From 7e92fa63da31af660681964848800c0b044301c2 Mon Sep 17 00:00:00 2001 From: katie Date: Thu, 6 Mar 2025 14:08:25 +0100 Subject: [PATCH 1/6] tests: translated log-analyzer to python --- .../tests/log-analyzer/task.yaml | 2 +- .../snapd-testing-tools/utils/log-analyzer | 484 ++++++------------ .../utils/test_log_analyzer.py | 242 +++++++++ 3 files changed, 398 insertions(+), 330 deletions(-) create mode 100644 tests/lib/external/snapd-testing-tools/utils/test_log_analyzer.py diff --git a/tests/lib/external/snapd-testing-tools/tests/log-analyzer/task.yaml b/tests/lib/external/snapd-testing-tools/tests/log-analyzer/task.yaml index 7add04199b8..81a68899ae0 100644 --- a/tests/lib/external/snapd-testing-tools/tests/log-analyzer/task.yaml +++ b/tests/lib/external/snapd-testing-tools/tests/log-analyzer/task.yaml @@ -73,7 +73,7 @@ execute: | # ubuntu-22.04 (F:1 P:1 A:0) ### CHECK HELP ### - log-analyzer | MATCH "usage: log-analyzer list-failed-tasks " + log-analyzer | MATCH "usage: log-analyzer [-h] command exec_params [parsed_log]" || true log-analyzer -h | MATCH "usage: log-analyzer list-failed-tasks " log-analyzer --help | MATCH "usage: log-analyzer list-failed-tasks " diff --git a/tests/lib/external/snapd-testing-tools/utils/log-analyzer b/tests/lib/external/snapd-testing-tools/utils/log-analyzer index 79ef43af227..5199fa37a58 100755 --- a/tests/lib/external/snapd-testing-tools/utils/log-analyzer +++ b/tests/lib/external/snapd-testing-tools/utils/log-analyzer @@ -1,330 +1,156 @@ -#!/bin/bash - -show_help() { - echo "usage: log-analyzer list-failed-tasks " - echo " log-analyzer list-executed-tasks " - echo " log-analyzer list-successful-tasks " - echo " log-analyzer list-aborted-tasks " - echo " log-analyzer list-all-tasks " - echo " log-analyzer list-reexecute-tasks " - echo "" - echo "The log analyzer is an utility wchi provides useful information about a spread" - echo "execution. The main functionality of the analyzer utility is to determine which tests" - echo "have to be re-executed, being able to include the tests that have been aborted, even" - echo "when those tests are not included in the test results." - echo "The log analyzer uses as input the spread expression that was used to run the tests," - echo "this expression determines which are all the tests to be considered. The second input" - echo "is the output of the log-parser utility, which generates a json file including all the" - echo "information extracted from the raw spread log" - echo "" - echo "Available options:" - echo " -h --help show this help message." - echo "" - echo "COMMANDS:" - echo " list-failed-tasks list the tasks that failed during execute" - echo " list-executed-tasks list the tasks that were executed" - echo " list-successful-tasks list the successful tasks" - echo " list-aborted-tasks list the aborted tasks (needs spread to be installed)" - echo " list-all-tasks list all the tasks" - echo " list-reexecute-tasks list the tasks to re-execute to complete (includes aborted and failed tasks)" - echo "" - echo "PARSED-LOG: This is the output generated by the log-parser tool" - echo "EXEC-PARAM: this is the parameter used to run spread (something like this BACKEND:SYSTEM:SUITE)" - echo "" -} - -_check_log() { - local log="$1" - - if [ -z "$log" ]; then - echo "log.analyzer: the log file cannot be empty" - exit 1 - elif [ ! -f "$log" ]; then - echo "log.analyzer: the log file $log does not exist" - exit 1 - fi -} - -_list_failed() { - local level="$1" - local stage="$2" - local log="$3" - - if [ -z "$level" ]; then - echo "log.analyzer: the first parameter cannot be empty" - exit 1 - elif [ ! "$level" = 'task' ] && [ ! "$level" = 'suite' ] && [ ! "$level" = 'project' ]; then - echo "log.analyzer: the first parameter has to be: task, suite or project" - exit 1 - fi - - if [ -z "$stage" ]; then - echo "log.analyzer: the second parameter cannot be empty" - exit 1 - elif [ ! "$stage" = 'prepare' ] && [ ! "$stage" = 'restore' ]; then - echo "log.analyzer: the second parameter has to be: prepare or restore" - exit 1 - fi - _check_log "$log" - - jq -r ".[] | select( .type == \"result\") | select( .result_type == \"Failed\") | select(.level == \"$level\") | select(.stage == \"$stage\") | .detail.lines[]" "$log" | cut -d '-' -f2- | xargs -} - -_merge_tasks_lists() { - # Returns the list1 + the tasks in list2 which are not included in list1 - local list1="$1" - local list2="$2" - local merged_list="$1" - local list1_file - - list1_file=$(mktemp list1.XXXXXX) - for elem in $list1; do - echo "$elem" >> "$list1_file" - done - - # shellcheck disable=SC2086 - for elem2 in $list2; do - if ! grep -Fxq "$elem2" "$list1_file"; then - merged_list="$merged_list $elem2" - fi - done - rm "$list1_file" - echo "$merged_list" -} - -_diff_tasks_lists() { - # Returns the list1 - the tasks in list2 - local list1="$1" - local list2="$2" - local diff_list list2_file - - diff_list="" - list2_file=$(mktemp list2.XXXXXX) - for elem in $list2; do - echo "$elem" >> "$list2_file" - done - - # shellcheck disable=SC2086 - for elem1 in $list1; do - if ! grep -Fxq "$elem1" "$list2_file"; then - diff_list="$diff_list $elem1" - fi - done - rm "$list2_file" - echo "$diff_list" -} - -_intersection_tasks_lists() { - # Returns the tasks in list1 which are also in the list2 - local list1="$1" - local list2="$2" - local both_list list2_file - - both_list="" - list2_file=$(mktemp list2.XXXXXX) - for elem in $list2; do - echo "$elem" >> "$list2_file" - done - for elem in $list1; do - # -F tells grep to look for fixed strings, not regexps - if grep -Fxq "$elem" "$list2_file"; then - both_list="$both_list $elem" - fi - done - rm "$list2_file" - echo "$both_list" -} - -list_all_tasks() { - local exec_exp="$1" - exec_exp="$(echo "$exec_exp" | tr ',' ' ')" - if ! command -v spread >/dev/null; then - echo "log.analyzer: spread tool is not installed, exiting..." - exit 1 - fi - - # shellcheck disable=SC2086 - spread -list $exec_exp -} - -_list_executed_and_failed_tasks() { - local exec_exp="$1" - local log="$2" - - if ! command -v spread >/dev/null; then - echo "log.analyzer: spread tool is not installed, exiting..." - exit 1 - fi - _check_log "$log" - - local failed_tasks failed_tasks_restore failed_tasks_prepare exec_and_failed_tasks - failed_tasks="$(list_failed_tasks "$exec_exp" "$log")" - failed_tasks_prepare="$(_list_failed task prepare "$log")" - failed_tasks_restore="$(_list_failed task restore "$log")" - - exec_and_failed_tasks="$(_merge_tasks_lists "$failed_tasks" "$failed_tasks_restore")" - _diff_tasks_lists "$exec_and_failed_tasks" "$failed_tasks_prepare" -} - -list_failed_tasks() { - local exec_exp="$1" - local log="$2" - - if [ -z "$exec_exp" ]; then - echo "log.analyzer: execution expression for spread cannot be empty" - exit 1 - fi - exec_exp="$(echo "$exec_exp" | tr ',' ' ')" - _check_log "$log" - - local all_tasks failed_tasks - all_tasks="$(list_all_tasks "$exec_exp")" - failed_tasks="$(jq -r '.[] | select( .type == "result") | select( .result_type == "Failed") | select(.level == "tasks") | .detail.lines[]' "$log" | cut -d '-' -f2- | xargs)" - _intersection_tasks_lists "$failed_tasks" "$all_tasks" -} - -list_reexecute_tasks() { - local exec_exp="$1" - local log="$2" - - if [ -z "$exec_exp" ]; then - echo "log.analyzer: execution expression for spread cannot be empty" - exit 1 - fi - exec_exp="$(echo "$exec_exp" | tr ',' ' ')" - _check_log "$log" - - local aborted_tasks exec_and_failed_tasks all_tasks reexec_tasks - aborted_tasks="$(list_aborted_tasks "$exec_exp" "$log")" - all_tasks="$(list_all_tasks "$exec_exp")" - exec_and_failed_tasks="$(_list_executed_and_failed_tasks "$exec_exp" "$log")" - - # Remove the tasks which are not in the filter from the executed and failed - exec_and_failed_tasks="$(_intersection_tasks_lists "$exec_and_failed_tasks" "$all_tasks")" - reexec_tasks="$(_merge_tasks_lists "$aborted_tasks" "$exec_and_failed_tasks")" - - # In case all the tests are failed or aborted, then the execution expression is used to reexecute - if [ "$(echo "$reexec_tasks" | wc -w)" = "$(echo "$all_tasks" | wc -w)" ]; then - echo "$exec_exp" - return - fi - - # When all the tests were successful, then no tests need to be reexecuted - if [ "$(echo "$reexec_tasks" | wc -w)" = 0 ]; then - return - fi - echo "$reexec_tasks" -} - -list_successful_tasks() { - local exec_exp="$1" - local log="$2" - - if [ -z "$exec_exp" ]; then - echo "log.analyzer: execution expression for spread cannot be empty" - exit 1 - fi - exec_exp="$(echo "$exec_exp" | tr ',' ' ')" - _check_log "$log" - - local all_tasks executed_tasks failed_tasks_restore failed_tasks - all_tasks="$(list_all_tasks "$exec_exp")" - executed_tasks="$(list_executed_tasks "$exec_exp" "$log")" - executed_tasks="$(_intersection_tasks_lists "$executed_tasks" "$all_tasks")" - failed_tasks="$(list_failed_tasks "$exec_exp" "$log")" - failed_tasks_restore="$(_list_failed task restore "$log")" - - if [ -n "$failed_tasks_restore" ]; then - failed_tasks="$(_merge_tasks_lists "$failed_tasks" "$failed_tasks_restore")" - fi - - if [ -n "$failed_tasks" ]; then - executed_tasks="$(_diff_tasks_lists "$executed_tasks" "$failed_tasks")" - fi - - echo "$executed_tasks" -} - -list_executed_tasks() { - local exec_exp="$1" - local log="$2" - - if [ -z "$exec_exp" ]; then - echo "log.analyzer: execution expression for spread cannot be empty" - exit 1 - fi - exec_exp="$(echo "$exec_exp" | tr ',' ' ')" - _check_log "$log" - - local all_tasks executed_tasks - all_tasks="$(list_all_tasks "$exec_exp")" - executed_tasks="$(jq -r '.[] | select( .type == "phase") | select( .verb == "Executing") | .task' "$log")" - _intersection_tasks_lists "$executed_tasks" "$all_tasks" -} - -list_aborted_tasks() { - local exec_exp="$1" - local log="$2" - - if [ -z "$exec_exp" ]; then - echo "log.analyzer: execution expression for spread cannot be empty" - exit 1 - fi - exec_exp="$(echo "$exec_exp" | tr ',' ' ')" - _check_log "$log" - - local all_tasks executed_tasks failed_tasks_prepare failed_tasks_restore failed_tasks - all_tasks="$(list_all_tasks "$exec_exp")" - executed_tasks="$(list_executed_tasks "$exec_exp" "$log")" - failed_tasks="$(list_failed_tasks "$exec_exp" "$log")" - failed_tasks_prepare="$(_list_failed task prepare "$log")" - failed_tasks_restore="$(_list_failed task restore "$log")" - - # In case no tasks for the expression, the aborted list is empty - if [ -z "$all_tasks" ]; then - return - fi - - # In case no tasks are successfully executed, all the tasks - the failed ones are the aborted - if [ -z "$executed_tasks" ]; then - exec_and_failed_tasks="$(_list_executed_and_failed_tasks "$exec_exp" "$log")" - _diff_tasks_lists "$all_tasks" "$exec_and_failed_tasks" - return - fi +#!/usr/bin/env python3 + +import argparse +import json +import os +import subprocess +import sys + + +def filter_with_spread(exec_param: str): + exec_param = exec_param.replace(',', ' ') + cmd = ['spread', '-list'] + cmd.extend(exec_param.split()) + return subprocess.check_output(cmd, universal_newlines=True).splitlines() + + +def list_executed_tasks(filtered_exec_param: set, spread_logs: dict): + executed = [log['task'] for log in spread_logs + if log['type'] == 'phase' and log['verb'] == 'Executing'] + return filtered_exec_param.intersection(executed) + + +def _get_detail_lines(spread_logs, log_condition_func): + result = [log['detail']['lines'] + for log in spread_logs if log_condition_func(log)] + # flatten the list + result = sum(result, []) + + def clean_entry(entry): + entry = entry.strip() + if entry.startswith('-'): + entry = entry[1:] + return entry.strip() + + return [clean_entry(log) for log in result] + + +def list_failed_tasks(filtered_exec_param: set, spread_logs: dict): + def log_condition(log): + return log['type'] == 'result' and log['result_type'] == 'Failed' and log['level'] == 'tasks' + failed = _get_detail_lines(spread_logs, log_condition) + return filtered_exec_param.intersection(failed) + + +def _list_failed(spread_logs, level, stage): + def log_condition(log): + return log['type'] == 'result' and log['result_type'] == 'Failed' and log['level'] == level and log['stage'] == stage + return _get_detail_lines(spread_logs, log_condition) + + +def list_executed_and_failed(filtered_exec_param: set, spread_logs: dict): + failed = list_failed_tasks(filtered_exec_param, spread_logs) + failed_prepare = _list_failed(spread_logs, 'task', 'prepare') + failed_restore = _list_failed(spread_logs, 'task', 'restore') + union = failed.union(failed_restore) + return union.difference(failed_prepare) + + +def list_aborted_tasks(filtered_exec_param: set, spread_logs: dict): + executed_tasks = list_executed_tasks(filtered_exec_param, spread_logs) + if len(executed_tasks) == 0: + exec_and_failed = list_executed_and_failed( + filtered_exec_param, spread_logs) + return filtered_exec_param.difference(exec_and_failed) + return filtered_exec_param.difference(executed_tasks) + + +def list_successful_tasks(filtered_exec_param: set, spread_logs: dict): + executed_tasks = list_executed_tasks(filtered_exec_param, spread_logs) + failed = list_failed_tasks(filtered_exec_param, spread_logs) + failed_restore = _list_failed(spread_logs, 'task', 'restore') + failed = failed.union(failed_restore) + if len(failed) > 0: + executed_tasks = executed_tasks.difference(failed) + return executed_tasks + + +def list_rexecute_tasks(exec_param, filtered_exec_param: set, spread_logs: dict): + aborted_tasks = list_aborted_tasks(filtered_exec_param, spread_logs) + exec_and_failed = list_executed_and_failed( + filtered_exec_param, spread_logs) + exec_and_failed = exec_and_failed.intersection(filtered_exec_param) + union = aborted_tasks.union(exec_and_failed) + if len(filtered_exec_param.difference(union)) == 0: + return set(exec_param.split()) + return union + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description=""" + usage: log-analyzer list-failed-tasks + log-analyzer list-executed-tasks + log-analyzer list-successful-tasks + log-analyzer list-aborted-tasks + log-analyzer list-all-tasks + log-analyzer list-reexecute-tasks + + The log analyzer is an utility that provides useful information about a spread + execution. The main functionality of the analyzer utility is to determine which tests + have to be re-executed, including aborted tests that are not included in the test results. + The log analyzer uses as input the spread expression that was used to run the tests. + This expression determines which tests to considered. The second input is the output of + the log-parser utility, which generates a json file including all the information + extracted from the raw spread log. + + COMMANDS: + list-failed-tasks list the tasks that failed during execute + list-executed-tasks list the tasks that were executed + list-successful-tasks list the successful tasks + list-aborted-tasks list the aborted tasks (needs spread to be installed) + list-all-tasks list all the tasks + list-reexecute-tasks list the tasks to re-execute to complete (includes aborted and failed tasks) - # In other cases the aborted tasks are all the tasks - the executed - the that failed - _diff_tasks_lists "$all_tasks" "$executed_tasks" -} - -main() { - if [ $# -eq 0 ]; then - show_help - exit 0 - fi - - local subcommand="$1" - local action= - while [ $# -gt 0 ]; do - case "$1" in - -h|--help) - show_help - exit 0 - ;; - *) - action=$(echo "$subcommand" | tr '-' '_') - shift - break - ;; - esac - done - - if [ -z "$(declare -f "$action")" ]; then - echo "log.analyzer: no such command: $subcommand" >&2 - show_help - exit 1 - fi - - "$action" "$@" -} - -main "$@" + """, formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument('command', help="One of {list-failed-tasks, list-executed-tasks, " + "list-successful-tasks, list-aborted-tasks, " + "list-all-tasks, list-reexecute-tasks}") + parser.add_argument( + 'exec_params', help='This is the parameter used to run spread (something like this BACKEND:SYSTEM:SUITE)') + parser.add_argument('parsed_log', nargs='?', + help='This is the output generated by the log-parser tool') + args = parser.parse_args() + + filtered_exec_param = set(filter_with_spread(args.exec_params)) + + log = None + if args.parsed_log: + if not os.path.isfile(args.parsed_log): + print("log.analyzer: the log file %s does not exist" % + args.parsed_log, file=sys.stderr) + exit(1) + + with open(args.parsed_log, 'r') as f: + log = json.load(f) + + if not log: + print("log.analyzer: the log file cannot be empty", file=sys.stderr) + exit(1) + + if args.command == 'list-failed-tasks': + print(' '.join(list_failed_tasks(filtered_exec_param, log))) + elif args.command == 'list-executed-tasks': + print(' '.join(list_executed_tasks(filtered_exec_param, log))) + elif args.command == 'list-successful-tasks': + print(' '.join(list_successful_tasks(filtered_exec_param, log))) + elif args.command == 'list-aborted-tasks': + print(' '.join(list_aborted_tasks(filtered_exec_param, log))) + elif args.command == 'list-all-tasks': + print(' '.join(filtered_exec_param)) + elif args.command == 'list-reexecute-tasks': + print(' '.join(list_rexecute_tasks( + args.exec_params, filtered_exec_param, log))) + else: + print("log.analyzer: no such command: %s" % + args.command, file=sys.stderr) + exit(1) diff --git a/tests/lib/external/snapd-testing-tools/utils/test_log_analyzer.py b/tests/lib/external/snapd-testing-tools/utils/test_log_analyzer.py new file mode 100644 index 00000000000..494afc22199 --- /dev/null +++ b/tests/lib/external/snapd-testing-tools/utils/test_log_analyzer.py @@ -0,0 +1,242 @@ +from importlib.util import spec_from_loader, module_from_spec +from importlib.machinery import SourceFileLoader +import os +import unittest + +# Since log-analyzer has a hyphen and is missing the .py extension, +# we need to do some extra work to import the module to test +dir_path = os.path.dirname(os.path.realpath(__file__)) +spec = spec_from_loader( + "log-analyzer", SourceFileLoader("log-analyzer", os.path.join(dir_path, "log-analyzer"))) +log_analyzer = module_from_spec(spec) +spec.loader.exec_module(log_analyzer) + + +def create_data(num_executed_no_fail, num_fail_execution, num_fail_restore, num_fail_prepare, num_not_executed): + # The order will be: + # 1. tasks that executed and didn't fail + # 2. tasks that failed during execution + # 3. tasks that failed during restore + # 4. tasks that failed during prepare + # 5. tasks that were not executed at all + + exec_param = ["test_" + str(i) for i in range(num_executed_no_fail + num_fail_execution + + num_fail_prepare + num_fail_restore + num_not_executed)] + + # The tasks that executed are those that didn't fail plus those that failed during execution or restore + spread_logs = [{'type': 'phase', 'verb': 'Executing', 'task': param} + for param in exec_param[:num_executed_no_fail + num_fail_execution + num_fail_restore]] + + begin = num_executed_no_fail + end = num_executed_no_fail+num_fail_execution + # The tasks that failed are those that failed during execution, not during restore or prepare + spread_logs.append({'type': 'result', + 'result_type': 'Failed', + 'level': 'tasks', + 'detail': {'lines': ["- %s\n" % param for param in exec_param[begin:end]]}}) + + begin = num_executed_no_fail+num_fail_execution + end = num_executed_no_fail+num_fail_execution+num_fail_restore + # Tasks that failed during the restore phase + spread_logs.append({'type': 'result', + 'result_type': 'Failed', + 'level': 'task', + 'stage': 'restore', + 'detail': {'lines': ["- %s\n" % param for param in exec_param[begin:end]]}}) + + begin = num_executed_no_fail+num_fail_execution+num_fail_restore + end = num_executed_no_fail+num_fail_execution+num_fail_restore+num_fail_prepare + # Tasks that failed during the prepare phase + spread_logs.append({'type': 'result', + 'result_type': 'Failed', + 'level': 'task', + 'stage': 'prepare', + 'detail': {'lines': ["- %s\n" % param for param in exec_param[begin:end]]}}) + + return set(exec_param), spread_logs + + +class TestLogAnalyzer(unittest.TestCase): + + def __init__(self, *args, **kwargs): + super(TestLogAnalyzer, self).__init__(*args, **kwargs) + + self.filtered_exec_param_mixed, self.spread_logs_mixed = create_data( + num_executed_no_fail=10, + num_fail_execution=10, + num_fail_restore=10, + num_fail_prepare=10, + num_not_executed=10) + self.exec_param_mixed = 'tests/...' + + self.filtered_exec_param_no_failed, self.spread_logs_no_failed = create_data( + num_executed_no_fail=10, + num_fail_execution=0, + num_fail_restore=0, + num_fail_prepare=0, + num_not_executed=0) + self.exec_param_no_failed = 'tests/...' + + self.filtered_exec_param_no_exec, self.spread_logs_no_exec = create_data( + num_executed_no_fail=0, + num_fail_execution=0, + num_fail_restore=0, + num_fail_prepare=10, + num_not_executed=10) + self.exec_param_no_exec = 'tests/...' + + self.filtered_exec_param_mix_success_abort, self.spread_logs_mix_success_abort = create_data( + num_executed_no_fail=10, + num_fail_execution=0, + num_fail_restore=0, + num_fail_prepare=0, + num_not_executed=10) + self.exec_param_mix_success_abort = 'tests/...' + + # The following test group has mixed results with task results + # of all kinds: successful, failed in all three phases, and not run + + def test_list_executed__mixed(self): + actual = log_analyzer.list_executed_tasks( + self.filtered_exec_param_mixed, self.spread_logs_mixed) + expected = set(["test_" + str(i) for i in range(30)]) + self.assertSetEqual(expected, actual) + + def test_list_failed__mixed(self): + actual = log_analyzer.list_failed_tasks( + self.filtered_exec_param_mixed, self.spread_logs_mixed) + expected = set(["test_" + str(i) for i in range(10, 20)]) + self.assertSetEqual(expected, actual) + + def test_list_successful__mixed(self): + actual = log_analyzer.list_successful_tasks( + self.filtered_exec_param_mixed, self.spread_logs_mixed) + expected = set(["test_" + str(i) for i in range(10)]) + self.assertSetEqual(expected, actual) + + def test_executed_and_failed__mixed(self): + actual = log_analyzer.list_executed_and_failed( + self.filtered_exec_param_mixed, self.spread_logs_mixed) + expected = set(["test_" + str(i) for i in range(10, 30)]) + self.assertSetEqual(expected, actual) + + def test_aborted_tasks__mixed(self): + actual = log_analyzer.list_aborted_tasks( + self.filtered_exec_param_mixed, self.spread_logs_mixed) + expected = set(["test_" + str(i) for i in range(30, 50)]) + self.assertSetEqual(expected, actual) + + def test_reexecute_tasks__mixed(self): + actual = log_analyzer.list_rexecute_tasks( + self.exec_param_mixed, self.filtered_exec_param_mixed, self.spread_logs_mixed) + expected = set(["test_" + str(i) for i in range(10, 50)]) + self.assertSetEqual(expected, actual) + + # The following test group has only tasks that were successfully run + + def test_list_executed__no_fail(self): + actual = log_analyzer.list_executed_tasks( + self.filtered_exec_param_no_failed, self.spread_logs_no_failed) + expected = set(["test_" + str(i) for i in range(10)]) + self.assertSetEqual(expected, actual) + + def test_list_failed__no_fail(self): + actual = log_analyzer.list_failed_tasks( + self.filtered_exec_param_no_failed, self.spread_logs_no_failed) + self.assertEqual(0, len(actual)) + + def test_list_successful__no_fail(self): + actual = log_analyzer.list_successful_tasks( + self.filtered_exec_param_no_failed, self.spread_logs_no_failed) + expected = set(["test_" + str(i) for i in range(10)]) + self.assertSetEqual(expected, actual) + + def test_executed_and_failed__no_fail(self): + actual = log_analyzer.list_executed_and_failed( + self.filtered_exec_param_no_failed, self.spread_logs_no_failed) + self.assertEqual(0, len(actual)) + + def test_aborted_tasks__no_fail(self): + actual = log_analyzer.list_aborted_tasks( + self.filtered_exec_param_no_failed, self.spread_logs_no_failed) + self.assertEqual(0, len(actual)) + + def test_reexecute_tasks__no_fail(self): + actual = log_analyzer.list_rexecute_tasks( + self.exec_param_no_failed, self.filtered_exec_param_no_failed, self.spread_logs_no_failed) + self.assertEqual(0, len(actual)) + + # The following group only has tasks that either failed + # during the prepare phase or were not run at all + + def test_list_executed__no_exec(self): + actual = log_analyzer.list_executed_tasks( + self.filtered_exec_param_no_exec, self.spread_logs_no_exec) + self.assertEqual(0, len(actual)) + + def test_list_failed__no_exec(self): + actual = log_analyzer.list_failed_tasks( + self.filtered_exec_param_no_exec, self.spread_logs_no_exec) + self.assertEqual(0, len(actual)) + + def test_list_successful__no_exec(self): + actual = log_analyzer.list_successful_tasks( + self.filtered_exec_param_no_exec, self.spread_logs_no_exec) + self.assertEqual(0, len(actual)) + + def test_executed_and_failed__no_exec(self): + actual = log_analyzer.list_executed_and_failed( + self.filtered_exec_param_no_exec, self.spread_logs_no_exec) + self.assertEqual(0, len(actual)) + + def test_aborted_tasks__no_exec(self): + actual = log_analyzer.list_aborted_tasks( + self.filtered_exec_param_no_exec, self.spread_logs_no_exec) + expected = set(["test_" + str(i) for i in range(20)]) + self.assertSetEqual(expected, actual) + + def test_reexecute_tasks__no_exec(self): + actual = log_analyzer.list_rexecute_tasks( + self.exec_param_no_exec, self.filtered_exec_param_no_exec, self.spread_logs_no_exec) + self.assertSetEqual(set(['tests/...']), actual) + + # The following test group has tasks that either + # were successful or did not run at all + + def test_list_executed__mix_success_abort(self): + actual = log_analyzer.list_executed_tasks( + self.filtered_exec_param_mix_success_abort, self.spread_logs_mix_success_abort) + expected = set(["test_" + str(i) for i in range(10)]) + self.assertSetEqual(expected, actual) + + def test_list_failed__mix_success_abort(self): + actual = log_analyzer.list_failed_tasks( + self.filtered_exec_param_mix_success_abort, self.spread_logs_mix_success_abort) + self.assertEqual(len(actual), 0) + + def test_list_successful__mix_success_abort(self): + actual = log_analyzer.list_successful_tasks( + self.filtered_exec_param_mix_success_abort, self.spread_logs_mix_success_abort) + expected = set(["test_" + str(i) for i in range(10)]) + self.assertSetEqual(expected, actual) + + def test_executed_and_failed__mix_success_abort(self): + actual = log_analyzer.list_executed_and_failed( + self.filtered_exec_param_mix_success_abort, self.spread_logs_mix_success_abort) + self.assertEqual(len(actual), 0) + + def test_aborted_tasks__mix_success_abort(self): + actual = log_analyzer.list_aborted_tasks( + self.filtered_exec_param_mix_success_abort, self.spread_logs_mix_success_abort) + expected = set(["test_" + str(i) for i in range(10, 20)]) + self.assertSetEqual(expected, actual) + + def test_reexecute_tasks__mix_success_abort(self): + actual = log_analyzer.list_rexecute_tasks( + self.exec_param_mix_success_abort, self.filtered_exec_param_mix_success_abort, self.spread_logs_mix_success_abort) + expected = set(["test_" + str(i) for i in range(10, 20)]) + self.assertSetEqual(expected, actual) + + +if __name__ == '__main__': + unittest.main() From 807af21dbeaf92216581c373f591d1b7fdbcbbc5 Mon Sep 17 00:00:00 2001 From: katie Date: Tue, 11 Mar 2025 09:46:05 +0100 Subject: [PATCH 2/6] tests: use sub parsers and type hints --- .../tests/log-analyzer/task.yaml | 6 +- .../snapd-testing-tools/utils/log-analyzer | 109 ++++++++++-------- 2 files changed, 64 insertions(+), 51 deletions(-) diff --git a/tests/lib/external/snapd-testing-tools/tests/log-analyzer/task.yaml b/tests/lib/external/snapd-testing-tools/tests/log-analyzer/task.yaml index 81a68899ae0..8efc4897b15 100644 --- a/tests/lib/external/snapd-testing-tools/tests/log-analyzer/task.yaml +++ b/tests/lib/external/snapd-testing-tools/tests/log-analyzer/task.yaml @@ -73,9 +73,9 @@ execute: | # ubuntu-22.04 (F:1 P:1 A:0) ### CHECK HELP ### - log-analyzer | MATCH "usage: log-analyzer [-h] command exec_params [parsed_log]" || true - log-analyzer -h | MATCH "usage: log-analyzer list-failed-tasks " - log-analyzer --help | MATCH "usage: log-analyzer list-failed-tasks " + log-analyzer | MATCH "usage: log-analyzer [-h]" || true + log-analyzer -h | MATCH "Usage: log-analyzer list-failed-tasks " + log-analyzer --help | MATCH "Usage: log-analyzer list-failed-tasks " ### CHECK RE-EXECUTION ### diff --git a/tests/lib/external/snapd-testing-tools/utils/log-analyzer b/tests/lib/external/snapd-testing-tools/utils/log-analyzer index 5199fa37a58..221b87cb0ce 100755 --- a/tests/lib/external/snapd-testing-tools/utils/log-analyzer +++ b/tests/lib/external/snapd-testing-tools/utils/log-analyzer @@ -1,54 +1,63 @@ #!/usr/bin/env python3 +# The following script requires Python 3.9 or higher + import argparse import json -import os import subprocess -import sys +from typing import Callable, TypedDict + + +class SpreadLog(TypedDict): + task: str + type: str + verb: str + result_type: str + level: str + stage: str + detail: dict[str, list[str]] -def filter_with_spread(exec_param: str): +def filter_with_spread(exec_param: str) -> list[str]: exec_param = exec_param.replace(',', ' ') cmd = ['spread', '-list'] cmd.extend(exec_param.split()) return subprocess.check_output(cmd, universal_newlines=True).splitlines() -def list_executed_tasks(filtered_exec_param: set, spread_logs: dict): +def list_executed_tasks(filtered_exec_param: set[str], spread_logs: list[SpreadLog]) -> set[str]: executed = [log['task'] for log in spread_logs if log['type'] == 'phase' and log['verb'] == 'Executing'] return filtered_exec_param.intersection(executed) -def _get_detail_lines(spread_logs, log_condition_func): +def _get_detail_lines(spread_logs: list[SpreadLog], log_condition_func: Callable[[SpreadLog], bool]) -> list[str]: result = [log['detail']['lines'] for log in spread_logs if log_condition_func(log)] - # flatten the list - result = sum(result, []) - def clean_entry(entry): + def clean_entry(entry: str) -> str: entry = entry.strip() if entry.startswith('-'): entry = entry[1:] return entry.strip() - return [clean_entry(log) for log in result] + return [clean_entry(log) for sublist in result for log in sublist] -def list_failed_tasks(filtered_exec_param: set, spread_logs: dict): - def log_condition(log): +def list_failed_tasks(filtered_exec_param: set[str], spread_logs: list[SpreadLog]) -> set[str]: + def log_condition(log: SpreadLog) -> bool: return log['type'] == 'result' and log['result_type'] == 'Failed' and log['level'] == 'tasks' failed = _get_detail_lines(spread_logs, log_condition) return filtered_exec_param.intersection(failed) -def _list_failed(spread_logs, level, stage): - def log_condition(log): +def _list_failed(spread_logs: list[SpreadLog], level: str, stage: str) -> list[str]: + def log_condition(log: SpreadLog) -> bool: return log['type'] == 'result' and log['result_type'] == 'Failed' and log['level'] == level and log['stage'] == stage return _get_detail_lines(spread_logs, log_condition) -def list_executed_and_failed(filtered_exec_param: set, spread_logs: dict): +def list_executed_and_failed(filtered_exec_param: set[str], spread_logs: list[SpreadLog]) -> set[str]: failed = list_failed_tasks(filtered_exec_param, spread_logs) failed_prepare = _list_failed(spread_logs, 'task', 'prepare') failed_restore = _list_failed(spread_logs, 'task', 'restore') @@ -56,7 +65,7 @@ def list_executed_and_failed(filtered_exec_param: set, spread_logs: dict): return union.difference(failed_prepare) -def list_aborted_tasks(filtered_exec_param: set, spread_logs: dict): +def list_aborted_tasks(filtered_exec_param: set[str], spread_logs: list[SpreadLog]) -> set[str]: executed_tasks = list_executed_tasks(filtered_exec_param, spread_logs) if len(executed_tasks) == 0: exec_and_failed = list_executed_and_failed( @@ -65,7 +74,7 @@ def list_aborted_tasks(filtered_exec_param: set, spread_logs: dict): return filtered_exec_param.difference(executed_tasks) -def list_successful_tasks(filtered_exec_param: set, spread_logs: dict): +def list_successful_tasks(filtered_exec_param: set[str], spread_logs: list[SpreadLog]) -> set[str]: executed_tasks = list_executed_tasks(filtered_exec_param, spread_logs) failed = list_failed_tasks(filtered_exec_param, spread_logs) failed_restore = _list_failed(spread_logs, 'task', 'restore') @@ -75,7 +84,7 @@ def list_successful_tasks(filtered_exec_param: set, spread_logs: dict): return executed_tasks -def list_rexecute_tasks(exec_param, filtered_exec_param: set, spread_logs: dict): +def list_rexecute_tasks(exec_param: str, filtered_exec_param: set[str], spread_logs: list[SpreadLog]) -> set[str]: aborted_tasks = list_aborted_tasks(filtered_exec_param, spread_logs) exec_and_failed = list_executed_and_failed( filtered_exec_param, spread_logs) @@ -86,9 +95,16 @@ def list_rexecute_tasks(exec_param, filtered_exec_param: set, spread_logs: dict) return union +def add_arguments(parser: argparse.ArgumentParser) -> None: + parser.add_argument( + 'exec_params', help='This is the parameter used to run spread (something like this BACKEND:SYSTEM:SUITE)') + parser.add_argument('parsed_log', type=argparse.FileType( + 'r', encoding='utf-8'), help='This is the output generated by the log-parser tool') + + if __name__ == '__main__': parser = argparse.ArgumentParser(description=""" - usage: log-analyzer list-failed-tasks + Usage: log-analyzer list-failed-tasks log-analyzer list-executed-tasks log-analyzer list-successful-tasks log-analyzer list-aborted-tasks @@ -102,40 +118,39 @@ if __name__ == '__main__': This expression determines which tests to considered. The second input is the output of the log-parser utility, which generates a json file including all the information extracted from the raw spread log. - - COMMANDS: - list-failed-tasks list the tasks that failed during execute - list-executed-tasks list the tasks that were executed - list-successful-tasks list the successful tasks - list-aborted-tasks list the aborted tasks (needs spread to be installed) - list-all-tasks list all the tasks - list-reexecute-tasks list the tasks to re-execute to complete (includes aborted and failed tasks) - """, formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument('command', help="One of {list-failed-tasks, list-executed-tasks, " - "list-successful-tasks, list-aborted-tasks, " - "list-all-tasks, list-reexecute-tasks}") - parser.add_argument( + + subparsers = parser.add_subparsers(dest='command') + subparsers.required = True + failed = subparsers.add_parser( + 'list-failed-tasks', help='list the tasks that failed during execute') + executed = subparsers.add_parser( + 'list-executed-tasks', help='list the tasks that were executed') + successful = subparsers.add_parser( + 'list-successful-tasks', help='list the successful tasks') + aborted = subparsers.add_parser( + 'list-aborted-tasks', help='list the aborted tasks (needs spread to be installed)') + reexecute = subparsers.add_parser( + 'list-reexecute-tasks', help='list the tasks to re-execute to complete (includes aborted and failed tasks)') + list_all = subparsers.add_parser( + 'list-all-tasks', help='list all the tasks') + + add_arguments(failed) + add_arguments(executed) + add_arguments(successful) + add_arguments(aborted) + add_arguments(reexecute) + list_all.add_argument( 'exec_params', help='This is the parameter used to run spread (something like this BACKEND:SYSTEM:SUITE)') - parser.add_argument('parsed_log', nargs='?', - help='This is the output generated by the log-parser tool') + args = parser.parse_args() filtered_exec_param = set(filter_with_spread(args.exec_params)) - log = None - if args.parsed_log: - if not os.path.isfile(args.parsed_log): - print("log.analyzer: the log file %s does not exist" % - args.parsed_log, file=sys.stderr) - exit(1) - - with open(args.parsed_log, 'r') as f: - log = json.load(f) - + if hasattr(args, 'parsed_log'): + log = json.load(args.parsed_log) if not log: - print("log.analyzer: the log file cannot be empty", file=sys.stderr) - exit(1) + raise RuntimeError("log.analyzer: the log file cannot be empty") if args.command == 'list-failed-tasks': print(' '.join(list_failed_tasks(filtered_exec_param, log))) @@ -151,6 +166,4 @@ if __name__ == '__main__': print(' '.join(list_rexecute_tasks( args.exec_params, filtered_exec_param, log))) else: - print("log.analyzer: no such command: %s" % - args.command, file=sys.stderr) - exit(1) + raise RuntimeError("log.analyzer: no such command: %s" % args.command) From adf0eafaee66c40dff3aa5b32bcb298ba636e325 Mon Sep 17 00:00:00 2001 From: Zygmunt Krynicki Date: Mon, 17 Mar 2025 13:32:29 +0100 Subject: [PATCH 3/6] tests: re-format log-analyzer with black Signed-off-by: Zygmunt Krynicki --- .../snapd-testing-tools/utils/log-analyzer | 144 ++++++++----- .../utils/test_log_analyzer.py | 204 ++++++++++++------ 2 files changed, 233 insertions(+), 115 deletions(-) diff --git a/tests/lib/external/snapd-testing-tools/utils/log-analyzer b/tests/lib/external/snapd-testing-tools/utils/log-analyzer index 221b87cb0ce..02ea09ba697 100755 --- a/tests/lib/external/snapd-testing-tools/utils/log-analyzer +++ b/tests/lib/external/snapd-testing-tools/utils/log-analyzer @@ -19,75 +19,100 @@ class SpreadLog(TypedDict): def filter_with_spread(exec_param: str) -> list[str]: - exec_param = exec_param.replace(',', ' ') - cmd = ['spread', '-list'] + exec_param = exec_param.replace(",", " ") + cmd = ["spread", "-list"] cmd.extend(exec_param.split()) return subprocess.check_output(cmd, universal_newlines=True).splitlines() -def list_executed_tasks(filtered_exec_param: set[str], spread_logs: list[SpreadLog]) -> set[str]: - executed = [log['task'] for log in spread_logs - if log['type'] == 'phase' and log['verb'] == 'Executing'] +def list_executed_tasks( + filtered_exec_param: set[str], spread_logs: list[SpreadLog] +) -> set[str]: + executed = [ + log["task"] + for log in spread_logs + if log["type"] == "phase" and log["verb"] == "Executing" + ] return filtered_exec_param.intersection(executed) -def _get_detail_lines(spread_logs: list[SpreadLog], log_condition_func: Callable[[SpreadLog], bool]) -> list[str]: - result = [log['detail']['lines'] - for log in spread_logs if log_condition_func(log)] +def _get_detail_lines( + spread_logs: list[SpreadLog], log_condition_func: Callable[[SpreadLog], bool] +) -> list[str]: + result = [log["detail"]["lines"] for log in spread_logs if log_condition_func(log)] def clean_entry(entry: str) -> str: entry = entry.strip() - if entry.startswith('-'): + if entry.startswith("-"): entry = entry[1:] return entry.strip() return [clean_entry(log) for sublist in result for log in sublist] -def list_failed_tasks(filtered_exec_param: set[str], spread_logs: list[SpreadLog]) -> set[str]: +def list_failed_tasks( + filtered_exec_param: set[str], spread_logs: list[SpreadLog] +) -> set[str]: def log_condition(log: SpreadLog) -> bool: - return log['type'] == 'result' and log['result_type'] == 'Failed' and log['level'] == 'tasks' + return ( + log["type"] == "result" + and log["result_type"] == "Failed" + and log["level"] == "tasks" + ) + failed = _get_detail_lines(spread_logs, log_condition) return filtered_exec_param.intersection(failed) def _list_failed(spread_logs: list[SpreadLog], level: str, stage: str) -> list[str]: def log_condition(log: SpreadLog) -> bool: - return log['type'] == 'result' and log['result_type'] == 'Failed' and log['level'] == level and log['stage'] == stage + return ( + log["type"] == "result" + and log["result_type"] == "Failed" + and log["level"] == level + and log["stage"] == stage + ) + return _get_detail_lines(spread_logs, log_condition) -def list_executed_and_failed(filtered_exec_param: set[str], spread_logs: list[SpreadLog]) -> set[str]: +def list_executed_and_failed( + filtered_exec_param: set[str], spread_logs: list[SpreadLog] +) -> set[str]: failed = list_failed_tasks(filtered_exec_param, spread_logs) - failed_prepare = _list_failed(spread_logs, 'task', 'prepare') - failed_restore = _list_failed(spread_logs, 'task', 'restore') + failed_prepare = _list_failed(spread_logs, "task", "prepare") + failed_restore = _list_failed(spread_logs, "task", "restore") union = failed.union(failed_restore) return union.difference(failed_prepare) -def list_aborted_tasks(filtered_exec_param: set[str], spread_logs: list[SpreadLog]) -> set[str]: +def list_aborted_tasks( + filtered_exec_param: set[str], spread_logs: list[SpreadLog] +) -> set[str]: executed_tasks = list_executed_tasks(filtered_exec_param, spread_logs) if len(executed_tasks) == 0: - exec_and_failed = list_executed_and_failed( - filtered_exec_param, spread_logs) + exec_and_failed = list_executed_and_failed(filtered_exec_param, spread_logs) return filtered_exec_param.difference(exec_and_failed) return filtered_exec_param.difference(executed_tasks) -def list_successful_tasks(filtered_exec_param: set[str], spread_logs: list[SpreadLog]) -> set[str]: +def list_successful_tasks( + filtered_exec_param: set[str], spread_logs: list[SpreadLog] +) -> set[str]: executed_tasks = list_executed_tasks(filtered_exec_param, spread_logs) failed = list_failed_tasks(filtered_exec_param, spread_logs) - failed_restore = _list_failed(spread_logs, 'task', 'restore') + failed_restore = _list_failed(spread_logs, "task", "restore") failed = failed.union(failed_restore) if len(failed) > 0: executed_tasks = executed_tasks.difference(failed) return executed_tasks -def list_rexecute_tasks(exec_param: str, filtered_exec_param: set[str], spread_logs: list[SpreadLog]) -> set[str]: +def list_rexecute_tasks( + exec_param: str, filtered_exec_param: set[str], spread_logs: list[SpreadLog] +) -> set[str]: aborted_tasks = list_aborted_tasks(filtered_exec_param, spread_logs) - exec_and_failed = list_executed_and_failed( - filtered_exec_param, spread_logs) + exec_and_failed = list_executed_and_failed(filtered_exec_param, spread_logs) exec_and_failed = exec_and_failed.intersection(filtered_exec_param) union = aborted_tasks.union(exec_and_failed) if len(filtered_exec_param.difference(union)) == 0: @@ -97,13 +122,19 @@ def list_rexecute_tasks(exec_param: str, filtered_exec_param: set[str], spread_l def add_arguments(parser: argparse.ArgumentParser) -> None: parser.add_argument( - 'exec_params', help='This is the parameter used to run spread (something like this BACKEND:SYSTEM:SUITE)') - parser.add_argument('parsed_log', type=argparse.FileType( - 'r', encoding='utf-8'), help='This is the output generated by the log-parser tool') + "exec_params", + help="This is the parameter used to run spread (something like this BACKEND:SYSTEM:SUITE)", + ) + parser.add_argument( + "parsed_log", + type=argparse.FileType("r", encoding="utf-8"), + help="This is the output generated by the log-parser tool", + ) -if __name__ == '__main__': - parser = argparse.ArgumentParser(description=""" +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=""" Usage: log-analyzer list-failed-tasks log-analyzer list-executed-tasks log-analyzer list-successful-tasks @@ -118,22 +149,30 @@ if __name__ == '__main__': This expression determines which tests to considered. The second input is the output of the log-parser utility, which generates a json file including all the information extracted from the raw spread log. - """, formatter_class=argparse.RawDescriptionHelpFormatter) + """, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) - subparsers = parser.add_subparsers(dest='command') + subparsers = parser.add_subparsers(dest="command") subparsers.required = True failed = subparsers.add_parser( - 'list-failed-tasks', help='list the tasks that failed during execute') + "list-failed-tasks", help="list the tasks that failed during execute" + ) executed = subparsers.add_parser( - 'list-executed-tasks', help='list the tasks that were executed') + "list-executed-tasks", help="list the tasks that were executed" + ) successful = subparsers.add_parser( - 'list-successful-tasks', help='list the successful tasks') + "list-successful-tasks", help="list the successful tasks" + ) aborted = subparsers.add_parser( - 'list-aborted-tasks', help='list the aborted tasks (needs spread to be installed)') + "list-aborted-tasks", + help="list the aborted tasks (needs spread to be installed)", + ) reexecute = subparsers.add_parser( - 'list-reexecute-tasks', help='list the tasks to re-execute to complete (includes aborted and failed tasks)') - list_all = subparsers.add_parser( - 'list-all-tasks', help='list all the tasks') + "list-reexecute-tasks", + help="list the tasks to re-execute to complete (includes aborted and failed tasks)", + ) + list_all = subparsers.add_parser("list-all-tasks", help="list all the tasks") add_arguments(failed) add_arguments(executed) @@ -141,29 +180,30 @@ if __name__ == '__main__': add_arguments(aborted) add_arguments(reexecute) list_all.add_argument( - 'exec_params', help='This is the parameter used to run spread (something like this BACKEND:SYSTEM:SUITE)') + "exec_params", + help="This is the parameter used to run spread (something like this BACKEND:SYSTEM:SUITE)", + ) args = parser.parse_args() filtered_exec_param = set(filter_with_spread(args.exec_params)) - if hasattr(args, 'parsed_log'): + if hasattr(args, "parsed_log"): log = json.load(args.parsed_log) if not log: raise RuntimeError("log.analyzer: the log file cannot be empty") - if args.command == 'list-failed-tasks': - print(' '.join(list_failed_tasks(filtered_exec_param, log))) - elif args.command == 'list-executed-tasks': - print(' '.join(list_executed_tasks(filtered_exec_param, log))) - elif args.command == 'list-successful-tasks': - print(' '.join(list_successful_tasks(filtered_exec_param, log))) - elif args.command == 'list-aborted-tasks': - print(' '.join(list_aborted_tasks(filtered_exec_param, log))) - elif args.command == 'list-all-tasks': - print(' '.join(filtered_exec_param)) - elif args.command == 'list-reexecute-tasks': - print(' '.join(list_rexecute_tasks( - args.exec_params, filtered_exec_param, log))) + if args.command == "list-failed-tasks": + print(" ".join(list_failed_tasks(filtered_exec_param, log))) + elif args.command == "list-executed-tasks": + print(" ".join(list_executed_tasks(filtered_exec_param, log))) + elif args.command == "list-successful-tasks": + print(" ".join(list_successful_tasks(filtered_exec_param, log))) + elif args.command == "list-aborted-tasks": + print(" ".join(list_aborted_tasks(filtered_exec_param, log))) + elif args.command == "list-all-tasks": + print(" ".join(filtered_exec_param)) + elif args.command == "list-reexecute-tasks": + print(" ".join(list_rexecute_tasks(args.exec_params, filtered_exec_param, log))) else: raise RuntimeError("log.analyzer: no such command: %s" % args.command) diff --git a/tests/lib/external/snapd-testing-tools/utils/test_log_analyzer.py b/tests/lib/external/snapd-testing-tools/utils/test_log_analyzer.py index 494afc22199..1156cb3d899 100644 --- a/tests/lib/external/snapd-testing-tools/utils/test_log_analyzer.py +++ b/tests/lib/external/snapd-testing-tools/utils/test_log_analyzer.py @@ -7,12 +7,20 @@ # we need to do some extra work to import the module to test dir_path = os.path.dirname(os.path.realpath(__file__)) spec = spec_from_loader( - "log-analyzer", SourceFileLoader("log-analyzer", os.path.join(dir_path, "log-analyzer"))) + "log-analyzer", + SourceFileLoader("log-analyzer", os.path.join(dir_path, "log-analyzer")), +) log_analyzer = module_from_spec(spec) spec.loader.exec_module(log_analyzer) -def create_data(num_executed_no_fail, num_fail_execution, num_fail_restore, num_fail_prepare, num_not_executed): +def create_data( + num_executed_no_fail, + num_fail_execution, + num_fail_restore, + num_fail_prepare, + num_not_executed, +): # The order will be: # 1. tasks that executed and didn't fail # 2. tasks that failed during execution @@ -20,38 +28,64 @@ def create_data(num_executed_no_fail, num_fail_execution, num_fail_restore, num_ # 4. tasks that failed during prepare # 5. tasks that were not executed at all - exec_param = ["test_" + str(i) for i in range(num_executed_no_fail + num_fail_execution + - num_fail_prepare + num_fail_restore + num_not_executed)] + exec_param = [ + "test_" + str(i) + for i in range( + num_executed_no_fail + + num_fail_execution + + num_fail_prepare + + num_fail_restore + + num_not_executed + ) + ] # The tasks that executed are those that didn't fail plus those that failed during execution or restore - spread_logs = [{'type': 'phase', 'verb': 'Executing', 'task': param} - for param in exec_param[:num_executed_no_fail + num_fail_execution + num_fail_restore]] + spread_logs = [ + {"type": "phase", "verb": "Executing", "task": param} + for param in exec_param[ + : num_executed_no_fail + num_fail_execution + num_fail_restore + ] + ] begin = num_executed_no_fail - end = num_executed_no_fail+num_fail_execution + end = num_executed_no_fail + num_fail_execution # The tasks that failed are those that failed during execution, not during restore or prepare - spread_logs.append({'type': 'result', - 'result_type': 'Failed', - 'level': 'tasks', - 'detail': {'lines': ["- %s\n" % param for param in exec_param[begin:end]]}}) - - begin = num_executed_no_fail+num_fail_execution - end = num_executed_no_fail+num_fail_execution+num_fail_restore + spread_logs.append( + { + "type": "result", + "result_type": "Failed", + "level": "tasks", + "detail": {"lines": ["- %s\n" % param for param in exec_param[begin:end]]}, + } + ) + + begin = num_executed_no_fail + num_fail_execution + end = num_executed_no_fail + num_fail_execution + num_fail_restore # Tasks that failed during the restore phase - spread_logs.append({'type': 'result', - 'result_type': 'Failed', - 'level': 'task', - 'stage': 'restore', - 'detail': {'lines': ["- %s\n" % param for param in exec_param[begin:end]]}}) - - begin = num_executed_no_fail+num_fail_execution+num_fail_restore - end = num_executed_no_fail+num_fail_execution+num_fail_restore+num_fail_prepare + spread_logs.append( + { + "type": "result", + "result_type": "Failed", + "level": "task", + "stage": "restore", + "detail": {"lines": ["- %s\n" % param for param in exec_param[begin:end]]}, + } + ) + + begin = num_executed_no_fail + num_fail_execution + num_fail_restore + end = ( + num_executed_no_fail + num_fail_execution + num_fail_restore + num_fail_prepare + ) # Tasks that failed during the prepare phase - spread_logs.append({'type': 'result', - 'result_type': 'Failed', - 'level': 'task', - 'stage': 'prepare', - 'detail': {'lines': ["- %s\n" % param for param in exec_param[begin:end]]}}) + spread_logs.append( + { + "type": "result", + "result_type": "Failed", + "level": "task", + "stage": "prepare", + "detail": {"lines": ["- %s\n" % param for param in exec_param[begin:end]]}, + } + ) return set(exec_param), spread_logs @@ -66,69 +100,84 @@ def __init__(self, *args, **kwargs): num_fail_execution=10, num_fail_restore=10, num_fail_prepare=10, - num_not_executed=10) - self.exec_param_mixed = 'tests/...' + num_not_executed=10, + ) + self.exec_param_mixed = "tests/..." self.filtered_exec_param_no_failed, self.spread_logs_no_failed = create_data( num_executed_no_fail=10, num_fail_execution=0, num_fail_restore=0, num_fail_prepare=0, - num_not_executed=0) - self.exec_param_no_failed = 'tests/...' + num_not_executed=0, + ) + self.exec_param_no_failed = "tests/..." self.filtered_exec_param_no_exec, self.spread_logs_no_exec = create_data( num_executed_no_fail=0, num_fail_execution=0, num_fail_restore=0, num_fail_prepare=10, - num_not_executed=10) - self.exec_param_no_exec = 'tests/...' - - self.filtered_exec_param_mix_success_abort, self.spread_logs_mix_success_abort = create_data( + num_not_executed=10, + ) + self.exec_param_no_exec = "tests/..." + + ( + self.filtered_exec_param_mix_success_abort, + self.spread_logs_mix_success_abort, + ) = create_data( num_executed_no_fail=10, num_fail_execution=0, num_fail_restore=0, num_fail_prepare=0, - num_not_executed=10) - self.exec_param_mix_success_abort = 'tests/...' + num_not_executed=10, + ) + self.exec_param_mix_success_abort = "tests/..." # The following test group has mixed results with task results # of all kinds: successful, failed in all three phases, and not run def test_list_executed__mixed(self): actual = log_analyzer.list_executed_tasks( - self.filtered_exec_param_mixed, self.spread_logs_mixed) + self.filtered_exec_param_mixed, self.spread_logs_mixed + ) expected = set(["test_" + str(i) for i in range(30)]) self.assertSetEqual(expected, actual) def test_list_failed__mixed(self): actual = log_analyzer.list_failed_tasks( - self.filtered_exec_param_mixed, self.spread_logs_mixed) + self.filtered_exec_param_mixed, self.spread_logs_mixed + ) expected = set(["test_" + str(i) for i in range(10, 20)]) self.assertSetEqual(expected, actual) def test_list_successful__mixed(self): actual = log_analyzer.list_successful_tasks( - self.filtered_exec_param_mixed, self.spread_logs_mixed) + self.filtered_exec_param_mixed, self.spread_logs_mixed + ) expected = set(["test_" + str(i) for i in range(10)]) self.assertSetEqual(expected, actual) def test_executed_and_failed__mixed(self): actual = log_analyzer.list_executed_and_failed( - self.filtered_exec_param_mixed, self.spread_logs_mixed) + self.filtered_exec_param_mixed, self.spread_logs_mixed + ) expected = set(["test_" + str(i) for i in range(10, 30)]) self.assertSetEqual(expected, actual) def test_aborted_tasks__mixed(self): actual = log_analyzer.list_aborted_tasks( - self.filtered_exec_param_mixed, self.spread_logs_mixed) + self.filtered_exec_param_mixed, self.spread_logs_mixed + ) expected = set(["test_" + str(i) for i in range(30, 50)]) self.assertSetEqual(expected, actual) def test_reexecute_tasks__mixed(self): actual = log_analyzer.list_rexecute_tasks( - self.exec_param_mixed, self.filtered_exec_param_mixed, self.spread_logs_mixed) + self.exec_param_mixed, + self.filtered_exec_param_mixed, + self.spread_logs_mixed, + ) expected = set(["test_" + str(i) for i in range(10, 50)]) self.assertSetEqual(expected, actual) @@ -136,34 +185,42 @@ def test_reexecute_tasks__mixed(self): def test_list_executed__no_fail(self): actual = log_analyzer.list_executed_tasks( - self.filtered_exec_param_no_failed, self.spread_logs_no_failed) + self.filtered_exec_param_no_failed, self.spread_logs_no_failed + ) expected = set(["test_" + str(i) for i in range(10)]) self.assertSetEqual(expected, actual) def test_list_failed__no_fail(self): actual = log_analyzer.list_failed_tasks( - self.filtered_exec_param_no_failed, self.spread_logs_no_failed) + self.filtered_exec_param_no_failed, self.spread_logs_no_failed + ) self.assertEqual(0, len(actual)) def test_list_successful__no_fail(self): actual = log_analyzer.list_successful_tasks( - self.filtered_exec_param_no_failed, self.spread_logs_no_failed) + self.filtered_exec_param_no_failed, self.spread_logs_no_failed + ) expected = set(["test_" + str(i) for i in range(10)]) self.assertSetEqual(expected, actual) def test_executed_and_failed__no_fail(self): actual = log_analyzer.list_executed_and_failed( - self.filtered_exec_param_no_failed, self.spread_logs_no_failed) + self.filtered_exec_param_no_failed, self.spread_logs_no_failed + ) self.assertEqual(0, len(actual)) def test_aborted_tasks__no_fail(self): actual = log_analyzer.list_aborted_tasks( - self.filtered_exec_param_no_failed, self.spread_logs_no_failed) + self.filtered_exec_param_no_failed, self.spread_logs_no_failed + ) self.assertEqual(0, len(actual)) def test_reexecute_tasks__no_fail(self): actual = log_analyzer.list_rexecute_tasks( - self.exec_param_no_failed, self.filtered_exec_param_no_failed, self.spread_logs_no_failed) + self.exec_param_no_failed, + self.filtered_exec_param_no_failed, + self.spread_logs_no_failed, + ) self.assertEqual(0, len(actual)) # The following group only has tasks that either failed @@ -171,72 +228,93 @@ def test_reexecute_tasks__no_fail(self): def test_list_executed__no_exec(self): actual = log_analyzer.list_executed_tasks( - self.filtered_exec_param_no_exec, self.spread_logs_no_exec) + self.filtered_exec_param_no_exec, self.spread_logs_no_exec + ) self.assertEqual(0, len(actual)) def test_list_failed__no_exec(self): actual = log_analyzer.list_failed_tasks( - self.filtered_exec_param_no_exec, self.spread_logs_no_exec) + self.filtered_exec_param_no_exec, self.spread_logs_no_exec + ) self.assertEqual(0, len(actual)) def test_list_successful__no_exec(self): actual = log_analyzer.list_successful_tasks( - self.filtered_exec_param_no_exec, self.spread_logs_no_exec) + self.filtered_exec_param_no_exec, self.spread_logs_no_exec + ) self.assertEqual(0, len(actual)) def test_executed_and_failed__no_exec(self): actual = log_analyzer.list_executed_and_failed( - self.filtered_exec_param_no_exec, self.spread_logs_no_exec) + self.filtered_exec_param_no_exec, self.spread_logs_no_exec + ) self.assertEqual(0, len(actual)) def test_aborted_tasks__no_exec(self): actual = log_analyzer.list_aborted_tasks( - self.filtered_exec_param_no_exec, self.spread_logs_no_exec) + self.filtered_exec_param_no_exec, self.spread_logs_no_exec + ) expected = set(["test_" + str(i) for i in range(20)]) self.assertSetEqual(expected, actual) def test_reexecute_tasks__no_exec(self): actual = log_analyzer.list_rexecute_tasks( - self.exec_param_no_exec, self.filtered_exec_param_no_exec, self.spread_logs_no_exec) - self.assertSetEqual(set(['tests/...']), actual) + self.exec_param_no_exec, + self.filtered_exec_param_no_exec, + self.spread_logs_no_exec, + ) + self.assertSetEqual(set(["tests/..."]), actual) # The following test group has tasks that either # were successful or did not run at all def test_list_executed__mix_success_abort(self): actual = log_analyzer.list_executed_tasks( - self.filtered_exec_param_mix_success_abort, self.spread_logs_mix_success_abort) + self.filtered_exec_param_mix_success_abort, + self.spread_logs_mix_success_abort, + ) expected = set(["test_" + str(i) for i in range(10)]) self.assertSetEqual(expected, actual) def test_list_failed__mix_success_abort(self): actual = log_analyzer.list_failed_tasks( - self.filtered_exec_param_mix_success_abort, self.spread_logs_mix_success_abort) + self.filtered_exec_param_mix_success_abort, + self.spread_logs_mix_success_abort, + ) self.assertEqual(len(actual), 0) def test_list_successful__mix_success_abort(self): actual = log_analyzer.list_successful_tasks( - self.filtered_exec_param_mix_success_abort, self.spread_logs_mix_success_abort) + self.filtered_exec_param_mix_success_abort, + self.spread_logs_mix_success_abort, + ) expected = set(["test_" + str(i) for i in range(10)]) self.assertSetEqual(expected, actual) def test_executed_and_failed__mix_success_abort(self): actual = log_analyzer.list_executed_and_failed( - self.filtered_exec_param_mix_success_abort, self.spread_logs_mix_success_abort) + self.filtered_exec_param_mix_success_abort, + self.spread_logs_mix_success_abort, + ) self.assertEqual(len(actual), 0) def test_aborted_tasks__mix_success_abort(self): actual = log_analyzer.list_aborted_tasks( - self.filtered_exec_param_mix_success_abort, self.spread_logs_mix_success_abort) + self.filtered_exec_param_mix_success_abort, + self.spread_logs_mix_success_abort, + ) expected = set(["test_" + str(i) for i in range(10, 20)]) self.assertSetEqual(expected, actual) def test_reexecute_tasks__mix_success_abort(self): actual = log_analyzer.list_rexecute_tasks( - self.exec_param_mix_success_abort, self.filtered_exec_param_mix_success_abort, self.spread_logs_mix_success_abort) + self.exec_param_mix_success_abort, + self.filtered_exec_param_mix_success_abort, + self.spread_logs_mix_success_abort, + ) expected = set(["test_" + str(i) for i in range(10, 20)]) self.assertSetEqual(expected, actual) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() From 64d668c91074e661fcc829251d6243da66beb665 Mon Sep 17 00:00:00 2001 From: Zygmunt Krynicki Date: Mon, 17 Mar 2025 13:20:58 +0100 Subject: [PATCH 4/6] tests: write more precise type for SpreadLog This has remaining, unfixed type bug that needs to be discussed. Signed-off-by: Zygmunt Krynicki --- .../snapd-testing-tools/utils/log-analyzer | 19 ++- .../utils/test_log_analyzer.py | 146 +++++++++++------- 2 files changed, 108 insertions(+), 57 deletions(-) diff --git a/tests/lib/external/snapd-testing-tools/utils/log-analyzer b/tests/lib/external/snapd-testing-tools/utils/log-analyzer index 02ea09ba697..17d35922151 100755 --- a/tests/lib/external/snapd-testing-tools/utils/log-analyzer +++ b/tests/lib/external/snapd-testing-tools/utils/log-analyzer @@ -5,17 +5,28 @@ import argparse import json import subprocess -from typing import Callable, TypedDict +from typing import Callable, TypedDict, Literal, Union -class SpreadLog(TypedDict): +class SpreadLog_TypePhase(TypedDict): + type: Literal["phase"] task: str - type: str verb: str + + +class SpreadLogDetail(TypedDict): + lines: list[str] + + +class SpreadLog_TypeResult(TypedDict): + type: Literal["result"] result_type: str level: str stage: str - detail: dict[str, list[str]] + detail: SpreadLogDetail + + +SpreadLog = Union[SpreadLog_TypePhase, SpreadLog_TypeResult] def filter_with_spread(exec_param: str) -> list[str]: diff --git a/tests/lib/external/snapd-testing-tools/utils/test_log_analyzer.py b/tests/lib/external/snapd-testing-tools/utils/test_log_analyzer.py index 1156cb3d899..604847f4d74 100644 --- a/tests/lib/external/snapd-testing-tools/utils/test_log_analyzer.py +++ b/tests/lib/external/snapd-testing-tools/utils/test_log_analyzer.py @@ -2,6 +2,7 @@ from importlib.machinery import SourceFileLoader import os import unittest +from typing import Any, Tuple, TypedDict, Literal, Union # Since log-analyzer has a hyphen and is missing the .py extension, # we need to do some extra work to import the module to test @@ -10,17 +11,44 @@ "log-analyzer", SourceFileLoader("log-analyzer", os.path.join(dir_path, "log-analyzer")), ) +if spec is None: + raise RuntimeError("cannot get log-analyzer spec") +if spec.loader is None: + raise RuntimeError("cannot get log-analyzer spec loader") log_analyzer = module_from_spec(spec) +if log_analyzer is None: + raise RuntimeError("cannot get log-analyzer spec") spec.loader.exec_module(log_analyzer) +class SpreadLog_TypePhase(TypedDict): + type: Literal["phase"] + task: str + verb: str + + +class SpreadLogDetail(TypedDict): + lines: list[str] + + +class SpreadLog_TypeResult(TypedDict): + type: Literal["result"] + result_type: str + level: str + stage: str + detail: SpreadLogDetail + + +SpreadLog = Union[SpreadLog_TypePhase, SpreadLog_TypeResult] + + def create_data( - num_executed_no_fail, - num_fail_execution, - num_fail_restore, - num_fail_prepare, - num_not_executed, -): + num_executed_no_fail: int, + num_fail_execution: int, + num_fail_restore: int, + num_fail_prepare: int, + num_not_executed: int, +) -> Tuple[set[str], list[SpreadLog]]: # The order will be: # 1. tasks that executed and didn't fail # 2. tasks that failed during execution @@ -40,8 +68,8 @@ def create_data( ] # The tasks that executed are those that didn't fail plus those that failed during execution or restore - spread_logs = [ - {"type": "phase", "verb": "Executing", "task": param} + spread_logs: list[SpreadLog] = [ + SpreadLog_TypePhase({"type": "phase", "verb": "Executing", "task": param}) for param in exec_param[ : num_executed_no_fail + num_fail_execution + num_fail_restore ] @@ -51,25 +79,33 @@ def create_data( end = num_executed_no_fail + num_fail_execution # The tasks that failed are those that failed during execution, not during restore or prepare spread_logs.append( - { - "type": "result", - "result_type": "Failed", - "level": "tasks", - "detail": {"lines": ["- %s\n" % param for param in exec_param[begin:end]]}, - } + SpreadLog_TypeResult( + { + "type": "result", + "result_type": "Failed", + "level": "tasks", + "detail": { + "lines": ["- %s\n" % param for param in exec_param[begin:end]] + }, + } + ) ) begin = num_executed_no_fail + num_fail_execution end = num_executed_no_fail + num_fail_execution + num_fail_restore # Tasks that failed during the restore phase spread_logs.append( - { - "type": "result", - "result_type": "Failed", - "level": "task", - "stage": "restore", - "detail": {"lines": ["- %s\n" % param for param in exec_param[begin:end]]}, - } + SpreadLog_TypeResult( + { + "type": "result", + "result_type": "Failed", + "level": "task", + "stage": "restore", + "detail": { + "lines": ["- %s\n" % param for param in exec_param[begin:end]] + }, + } + ) ) begin = num_executed_no_fail + num_fail_execution + num_fail_restore @@ -78,13 +114,17 @@ def create_data( ) # Tasks that failed during the prepare phase spread_logs.append( - { - "type": "result", - "result_type": "Failed", - "level": "task", - "stage": "prepare", - "detail": {"lines": ["- %s\n" % param for param in exec_param[begin:end]]}, - } + SpreadLog_TypeResult( + { + "type": "result", + "result_type": "Failed", + "level": "task", + "stage": "prepare", + "detail": { + "lines": ["- %s\n" % param for param in exec_param[begin:end]] + }, + } + ) ) return set(exec_param), spread_logs @@ -92,7 +132,7 @@ def create_data( class TestLogAnalyzer(unittest.TestCase): - def __init__(self, *args, **kwargs): + def __init__(self, *args: Any, **kwargs: Any) -> None: super(TestLogAnalyzer, self).__init__(*args, **kwargs) self.filtered_exec_param_mixed, self.spread_logs_mixed = create_data( @@ -137,42 +177,42 @@ def __init__(self, *args, **kwargs): # The following test group has mixed results with task results # of all kinds: successful, failed in all three phases, and not run - def test_list_executed__mixed(self): + def test_list_executed__mixed(self) -> None: actual = log_analyzer.list_executed_tasks( self.filtered_exec_param_mixed, self.spread_logs_mixed ) expected = set(["test_" + str(i) for i in range(30)]) self.assertSetEqual(expected, actual) - def test_list_failed__mixed(self): + def test_list_failed__mixed(self) -> None: actual = log_analyzer.list_failed_tasks( self.filtered_exec_param_mixed, self.spread_logs_mixed ) expected = set(["test_" + str(i) for i in range(10, 20)]) self.assertSetEqual(expected, actual) - def test_list_successful__mixed(self): + def test_list_successful__mixed(self) -> None: actual = log_analyzer.list_successful_tasks( self.filtered_exec_param_mixed, self.spread_logs_mixed ) expected = set(["test_" + str(i) for i in range(10)]) self.assertSetEqual(expected, actual) - def test_executed_and_failed__mixed(self): + def test_executed_and_failed__mixed(self) -> None: actual = log_analyzer.list_executed_and_failed( self.filtered_exec_param_mixed, self.spread_logs_mixed ) expected = set(["test_" + str(i) for i in range(10, 30)]) self.assertSetEqual(expected, actual) - def test_aborted_tasks__mixed(self): + def test_aborted_tasks__mixed(self) -> None: actual = log_analyzer.list_aborted_tasks( self.filtered_exec_param_mixed, self.spread_logs_mixed ) expected = set(["test_" + str(i) for i in range(30, 50)]) self.assertSetEqual(expected, actual) - def test_reexecute_tasks__mixed(self): + def test_reexecute_tasks__mixed(self) -> None: actual = log_analyzer.list_rexecute_tasks( self.exec_param_mixed, self.filtered_exec_param_mixed, @@ -183,39 +223,39 @@ def test_reexecute_tasks__mixed(self): # The following test group has only tasks that were successfully run - def test_list_executed__no_fail(self): + def test_list_executed__no_fail(self) -> None: actual = log_analyzer.list_executed_tasks( self.filtered_exec_param_no_failed, self.spread_logs_no_failed ) expected = set(["test_" + str(i) for i in range(10)]) self.assertSetEqual(expected, actual) - def test_list_failed__no_fail(self): + def test_list_failed__no_fail(self) -> None: actual = log_analyzer.list_failed_tasks( self.filtered_exec_param_no_failed, self.spread_logs_no_failed ) self.assertEqual(0, len(actual)) - def test_list_successful__no_fail(self): + def test_list_successful__no_fail(self) -> None: actual = log_analyzer.list_successful_tasks( self.filtered_exec_param_no_failed, self.spread_logs_no_failed ) expected = set(["test_" + str(i) for i in range(10)]) self.assertSetEqual(expected, actual) - def test_executed_and_failed__no_fail(self): + def test_executed_and_failed__no_fail(self) -> None: actual = log_analyzer.list_executed_and_failed( self.filtered_exec_param_no_failed, self.spread_logs_no_failed ) self.assertEqual(0, len(actual)) - def test_aborted_tasks__no_fail(self): + def test_aborted_tasks__no_fail(self) -> None: actual = log_analyzer.list_aborted_tasks( self.filtered_exec_param_no_failed, self.spread_logs_no_failed ) self.assertEqual(0, len(actual)) - def test_reexecute_tasks__no_fail(self): + def test_reexecute_tasks__no_fail(self) -> None: actual = log_analyzer.list_rexecute_tasks( self.exec_param_no_failed, self.filtered_exec_param_no_failed, @@ -226,38 +266,38 @@ def test_reexecute_tasks__no_fail(self): # The following group only has tasks that either failed # during the prepare phase or were not run at all - def test_list_executed__no_exec(self): + def test_list_executed__no_exec(self) -> None: actual = log_analyzer.list_executed_tasks( self.filtered_exec_param_no_exec, self.spread_logs_no_exec ) self.assertEqual(0, len(actual)) - def test_list_failed__no_exec(self): + def test_list_failed__no_exec(self) -> None: actual = log_analyzer.list_failed_tasks( self.filtered_exec_param_no_exec, self.spread_logs_no_exec ) self.assertEqual(0, len(actual)) - def test_list_successful__no_exec(self): + def test_list_successful__no_exec(self) -> None: actual = log_analyzer.list_successful_tasks( self.filtered_exec_param_no_exec, self.spread_logs_no_exec ) self.assertEqual(0, len(actual)) - def test_executed_and_failed__no_exec(self): + def test_executed_and_failed__no_exec(self) -> None: actual = log_analyzer.list_executed_and_failed( self.filtered_exec_param_no_exec, self.spread_logs_no_exec ) self.assertEqual(0, len(actual)) - def test_aborted_tasks__no_exec(self): + def test_aborted_tasks__no_exec(self) -> None: actual = log_analyzer.list_aborted_tasks( self.filtered_exec_param_no_exec, self.spread_logs_no_exec ) expected = set(["test_" + str(i) for i in range(20)]) self.assertSetEqual(expected, actual) - def test_reexecute_tasks__no_exec(self): + def test_reexecute_tasks__no_exec(self) -> None: actual = log_analyzer.list_rexecute_tasks( self.exec_param_no_exec, self.filtered_exec_param_no_exec, @@ -268,7 +308,7 @@ def test_reexecute_tasks__no_exec(self): # The following test group has tasks that either # were successful or did not run at all - def test_list_executed__mix_success_abort(self): + def test_list_executed__mix_success_abort(self) -> None: actual = log_analyzer.list_executed_tasks( self.filtered_exec_param_mix_success_abort, self.spread_logs_mix_success_abort, @@ -276,14 +316,14 @@ def test_list_executed__mix_success_abort(self): expected = set(["test_" + str(i) for i in range(10)]) self.assertSetEqual(expected, actual) - def test_list_failed__mix_success_abort(self): + def test_list_failed__mix_success_abort(self) -> None: actual = log_analyzer.list_failed_tasks( self.filtered_exec_param_mix_success_abort, self.spread_logs_mix_success_abort, ) self.assertEqual(len(actual), 0) - def test_list_successful__mix_success_abort(self): + def test_list_successful__mix_success_abort(self) -> None: actual = log_analyzer.list_successful_tasks( self.filtered_exec_param_mix_success_abort, self.spread_logs_mix_success_abort, @@ -291,14 +331,14 @@ def test_list_successful__mix_success_abort(self): expected = set(["test_" + str(i) for i in range(10)]) self.assertSetEqual(expected, actual) - def test_executed_and_failed__mix_success_abort(self): + def test_executed_and_failed__mix_success_abort(self) -> None: actual = log_analyzer.list_executed_and_failed( self.filtered_exec_param_mix_success_abort, self.spread_logs_mix_success_abort, ) self.assertEqual(len(actual), 0) - def test_aborted_tasks__mix_success_abort(self): + def test_aborted_tasks__mix_success_abort(self) -> None: actual = log_analyzer.list_aborted_tasks( self.filtered_exec_param_mix_success_abort, self.spread_logs_mix_success_abort, @@ -306,7 +346,7 @@ def test_aborted_tasks__mix_success_abort(self): expected = set(["test_" + str(i) for i in range(10, 20)]) self.assertSetEqual(expected, actual) - def test_reexecute_tasks__mix_success_abort(self): + def test_reexecute_tasks__mix_success_abort(self) -> None: actual = log_analyzer.list_rexecute_tasks( self.exec_param_mix_success_abort, self.filtered_exec_param_mix_success_abort, From b74b6ff6fc937f2be805758f406bf3dbb550785e Mon Sep 17 00:00:00 2001 From: katie Date: Tue, 18 Mar 2025 12:02:24 +0100 Subject: [PATCH 5/6] tests: minor changes and add unit tests for main --- .../tests/log-analyzer/task.yaml | 3 +- .../snapd-testing-tools/utils/log-analyzer | 57 +++++---- .../utils/test_log_analyzer.py | 113 ++++++++++++++++-- 3 files changed, 139 insertions(+), 34 deletions(-) diff --git a/tests/lib/external/snapd-testing-tools/tests/log-analyzer/task.yaml b/tests/lib/external/snapd-testing-tools/tests/log-analyzer/task.yaml index 8efc4897b15..ad851a4f6e1 100644 --- a/tests/lib/external/snapd-testing-tools/tests/log-analyzer/task.yaml +++ b/tests/lib/external/snapd-testing-tools/tests/log-analyzer/task.yaml @@ -242,7 +242,6 @@ execute: | ### CHECK ALL ### compare_tasks_count "$(log-analyzer list-all-tasks google:)" 10 - compare_tasks_count "$(log-analyzer list-all-tasks 'google:ubuntu-20.04-64: google:ubuntu-22.04-64:')" 10 - compare_tasks_count "$(log-analyzer list-all-tasks 'google:ubuntu-20.04-64:,google:ubuntu-22.04-64:')" 10 + compare_tasks_count "$(log-analyzer list-all-tasks google:ubuntu-20.04-64: google:ubuntu-22.04-64:)" 10 compare_tasks_count "$(log-analyzer list-all-tasks google:ubuntu-20.04-64:)" 5 compare_tasks_count "$(log-analyzer list-all-tasks google:ubuntu-22.04-64:)" 5 diff --git a/tests/lib/external/snapd-testing-tools/utils/log-analyzer b/tests/lib/external/snapd-testing-tools/utils/log-analyzer index 17d35922151..8665a39bb4c 100755 --- a/tests/lib/external/snapd-testing-tools/utils/log-analyzer +++ b/tests/lib/external/snapd-testing-tools/utils/log-analyzer @@ -8,20 +8,21 @@ import subprocess from typing import Callable, TypedDict, Literal, Union +class SpreadLogDetail(TypedDict): + lines: list[str] + + class SpreadLog_TypePhase(TypedDict): type: Literal["phase"] task: str - verb: str - - -class SpreadLogDetail(TypedDict): - lines: list[str] + verb: str # log_helper.ExecutionPhase + detail: SpreadLogDetail class SpreadLog_TypeResult(TypedDict): type: Literal["result"] - result_type: str - level: str + result_type: str # log_helper.Result + level: str # log_helper.ExecutionLevel stage: str detail: SpreadLogDetail @@ -29,11 +30,10 @@ class SpreadLog_TypeResult(TypedDict): SpreadLog = Union[SpreadLog_TypePhase, SpreadLog_TypeResult] -def filter_with_spread(exec_param: str) -> list[str]: - exec_param = exec_param.replace(",", " ") +def filter_with_spread(exec_param: list[str]) -> list[str]: cmd = ["spread", "-list"] - cmd.extend(exec_param.split()) - return subprocess.check_output(cmd, universal_newlines=True).splitlines() + cmd.extend(exec_param) + return subprocess.check_output(cmd, text=True).splitlines() def list_executed_tasks( @@ -50,15 +50,12 @@ def list_executed_tasks( def _get_detail_lines( spread_logs: list[SpreadLog], log_condition_func: Callable[[SpreadLog], bool] ) -> list[str]: - result = [log["detail"]["lines"] for log in spread_logs if log_condition_func(log)] + result = [log["detail"]["lines"] + for log in spread_logs if log_condition_func(log)] - def clean_entry(entry: str) -> str: - entry = entry.strip() - if entry.startswith("-"): - entry = entry[1:] - return entry.strip() - - return [clean_entry(log) for sublist in result for log in sublist] + # Each entry in ['detail']['lines'] is a spread task prefaced with a '-' and + # surrounding by whitespace + return [log.strip().removeprefix('-').strip() for sublist in result for log in sublist] def list_failed_tasks( @@ -102,7 +99,8 @@ def list_aborted_tasks( ) -> set[str]: executed_tasks = list_executed_tasks(filtered_exec_param, spread_logs) if len(executed_tasks) == 0: - exec_and_failed = list_executed_and_failed(filtered_exec_param, spread_logs) + exec_and_failed = list_executed_and_failed( + filtered_exec_param, spread_logs) return filtered_exec_param.difference(exec_and_failed) return filtered_exec_param.difference(executed_tasks) @@ -123,17 +121,19 @@ def list_rexecute_tasks( exec_param: str, filtered_exec_param: set[str], spread_logs: list[SpreadLog] ) -> set[str]: aborted_tasks = list_aborted_tasks(filtered_exec_param, spread_logs) - exec_and_failed = list_executed_and_failed(filtered_exec_param, spread_logs) + exec_and_failed = list_executed_and_failed( + filtered_exec_param, spread_logs) exec_and_failed = exec_and_failed.intersection(filtered_exec_param) union = aborted_tasks.union(exec_and_failed) if len(filtered_exec_param.difference(union)) == 0: - return set(exec_param.split()) + return set(exec_param) return union def add_arguments(parser: argparse.ArgumentParser) -> None: parser.add_argument( "exec_params", + nargs='+', help="This is the parameter used to run spread (something like this BACKEND:SYSTEM:SUITE)", ) parser.add_argument( @@ -143,7 +143,7 @@ def add_arguments(parser: argparse.ArgumentParser) -> None: ) -if __name__ == "__main__": +def main() -> None: parser = argparse.ArgumentParser( description=""" Usage: log-analyzer list-failed-tasks @@ -183,7 +183,8 @@ if __name__ == "__main__": "list-reexecute-tasks", help="list the tasks to re-execute to complete (includes aborted and failed tasks)", ) - list_all = subparsers.add_parser("list-all-tasks", help="list all the tasks") + list_all = subparsers.add_parser( + "list-all-tasks", help="list all the tasks") add_arguments(failed) add_arguments(executed) @@ -192,6 +193,7 @@ if __name__ == "__main__": add_arguments(reexecute) list_all.add_argument( "exec_params", + nargs='+', help="This is the parameter used to run spread (something like this BACKEND:SYSTEM:SUITE)", ) @@ -215,6 +217,11 @@ if __name__ == "__main__": elif args.command == "list-all-tasks": print(" ".join(filtered_exec_param)) elif args.command == "list-reexecute-tasks": - print(" ".join(list_rexecute_tasks(args.exec_params, filtered_exec_param, log))) + print(" ".join(list_rexecute_tasks( + args.exec_params, filtered_exec_param, log))) else: raise RuntimeError("log.analyzer: no such command: %s" % args.command) + + +if __name__ == "__main__": + main() diff --git a/tests/lib/external/snapd-testing-tools/utils/test_log_analyzer.py b/tests/lib/external/snapd-testing-tools/utils/test_log_analyzer.py index 604847f4d74..2ad6745d846 100644 --- a/tests/lib/external/snapd-testing-tools/utils/test_log_analyzer.py +++ b/tests/lib/external/snapd-testing-tools/utils/test_log_analyzer.py @@ -1,8 +1,12 @@ +import argparse +import json from importlib.util import spec_from_loader, module_from_spec from importlib.machinery import SourceFileLoader +from io import StringIO import os -import unittest from typing import Any, Tuple, TypedDict, Literal, Union +import unittest +from unittest.mock import Mock, patch # Since log-analyzer has a hyphen and is missing the .py extension, # we need to do some extra work to import the module to test @@ -69,7 +73,8 @@ def create_data( # The tasks that executed are those that didn't fail plus those that failed during execution or restore spread_logs: list[SpreadLog] = [ - SpreadLog_TypePhase({"type": "phase", "verb": "Executing", "task": param}) + SpreadLog_TypePhase( + {"type": "phase", "verb": "Executing", "task": param}) for param in exec_param[ : num_executed_no_fail + num_fail_execution + num_fail_restore ] @@ -84,6 +89,7 @@ def create_data( "type": "result", "result_type": "Failed", "level": "tasks", + "stage": "", "detail": { "lines": ["- %s\n" % param for param in exec_param[begin:end]] }, @@ -142,7 +148,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: num_fail_prepare=10, num_not_executed=10, ) - self.exec_param_mixed = "tests/..." + self.exec_param_mixed = ["tests/...", "other-tests/..."] self.filtered_exec_param_no_failed, self.spread_logs_no_failed = create_data( num_executed_no_fail=10, @@ -151,7 +157,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: num_fail_prepare=0, num_not_executed=0, ) - self.exec_param_no_failed = "tests/..." + self.exec_param_no_failed = ["tests/...", "other-tests/..."] self.filtered_exec_param_no_exec, self.spread_logs_no_exec = create_data( num_executed_no_fail=0, @@ -160,7 +166,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: num_fail_prepare=10, num_not_executed=10, ) - self.exec_param_no_exec = "tests/..." + self.exec_param_no_exec = ["tests/...", "other-tests/..."] ( self.filtered_exec_param_mix_success_abort, @@ -172,7 +178,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: num_fail_prepare=0, num_not_executed=10, ) - self.exec_param_mix_success_abort = "tests/..." + self.exec_param_mix_success_abort = ["tests/...", "other-tests/..."] # The following test group has mixed results with task results # of all kinds: successful, failed in all three phases, and not run @@ -303,7 +309,7 @@ def test_reexecute_tasks__no_exec(self) -> None: self.filtered_exec_param_no_exec, self.spread_logs_no_exec, ) - self.assertSetEqual(set(["tests/..."]), actual) + self.assertSetEqual(set(self.exec_param_no_exec), actual) # The following test group has tasks that either # were successful or did not run at all @@ -355,6 +361,99 @@ def test_reexecute_tasks__mix_success_abort(self) -> None: expected = set(["test_" + str(i) for i in range(10, 20)]) self.assertSetEqual(expected, actual) + # The following test group checks the main function with + # mixed results (some failures, some aborts, some successes) + + @patch('argparse.ArgumentParser.parse_args') + def test_list_executed__main(self, parse_args_mock: Mock) -> None: + log_analyzer.filter_with_spread = Mock() + log_analyzer.filter_with_spread.return_value = [ + "test_" + str(i) for i in range(50)] + parse_args_mock.return_value = argparse.Namespace( + command='list-executed-tasks', + exec_params=self.exec_param_mixed, + parsed_log=StringIO(json.dumps(self.spread_logs_mixed)) + ) + with patch('sys.stdout', new=StringIO()) as stdout_patch: + log_analyzer.main() + expected = set(["test_" + str(i) for i in range(30)]) + self.assertSetEqual(expected, set(stdout_patch.getvalue().split())) + + @patch('argparse.ArgumentParser.parse_args') + def test_list_failed__main(self, parse_args_mock: Mock) -> None: + log_analyzer.filter_with_spread = Mock() + log_analyzer.filter_with_spread.return_value = [ + "test_" + str(i) for i in range(50)] + parse_args_mock.return_value = argparse.Namespace( + command='list-failed-tasks', + exec_params=self.exec_param_mixed, + parsed_log=StringIO(json.dumps(self.spread_logs_mixed)) + ) + with patch('sys.stdout', new=StringIO()) as stdout_patch: + log_analyzer.main() + expected = set(["test_" + str(i) for i in range(10, 20)]) + self.assertSetEqual(expected, set(stdout_patch.getvalue().split())) + + @patch('argparse.ArgumentParser.parse_args') + def test_list_successful__main(self, parse_args_mock: Mock) -> None: + log_analyzer.filter_with_spread = Mock() + log_analyzer.filter_with_spread.return_value = [ + "test_" + str(i) for i in range(50)] + parse_args_mock.return_value = argparse.Namespace( + command='list-successful-tasks', + exec_params=self.exec_param_mixed, + parsed_log=StringIO(json.dumps(self.spread_logs_mixed)) + ) + with patch('sys.stdout', new=StringIO()) as stdout_patch: + log_analyzer.main() + expected = set(["test_" + str(i) for i in range(10)]) + self.assertSetEqual(expected, set(stdout_patch.getvalue().split())) + + @patch('argparse.ArgumentParser.parse_args') + def test_aborted_tasks__main(self, parse_args_mock: Mock) -> None: + log_analyzer.filter_with_spread = Mock() + log_analyzer.filter_with_spread.return_value = [ + "test_" + str(i) for i in range(50)] + parse_args_mock.return_value = argparse.Namespace( + command='list-aborted-tasks', + exec_params=self.exec_param_mixed, + parsed_log=StringIO(json.dumps(self.spread_logs_mixed)) + ) + with patch('sys.stdout', new=StringIO()) as stdout_patch: + log_analyzer.main() + expected = set(["test_" + str(i) for i in range(30, 50)]) + self.assertSetEqual(expected, set(stdout_patch.getvalue().split())) + + @patch('argparse.ArgumentParser.parse_args') + def test_reexecute_tasks__main(self, parse_args_mock: Mock) -> None: + log_analyzer.filter_with_spread = Mock() + log_analyzer.filter_with_spread.return_value = [ + "test_" + str(i) for i in range(50)] + parse_args_mock.return_value = argparse.Namespace( + command='list-reexecute-tasks', + exec_params=self.exec_param_mixed, + parsed_log=StringIO(json.dumps(self.spread_logs_mixed)) + ) + with patch('sys.stdout', new=StringIO()) as stdout_patch: + log_analyzer.main() + expected = set(["test_" + str(i) for i in range(10, 50)]) + self.assertSetEqual(expected, set(stdout_patch.getvalue().split())) + + @patch('argparse.ArgumentParser.parse_args') + def test_reexecute_tasks__main_no_exec(self, parse_args_mock: Mock) -> None: + log_analyzer.filter_with_spread = Mock() + log_analyzer.filter_with_spread.return_value = [ + "test_" + str(i) for i in range(50)] + parse_args_mock.return_value = argparse.Namespace( + command='list-reexecute-tasks', + exec_params=self.exec_param_no_exec, + parsed_log=StringIO(json.dumps(self.spread_logs_no_exec)) + ) + with patch('sys.stdout', new=StringIO()) as stdout_patch: + log_analyzer.main() + expected = set(self.exec_param_no_exec) + self.assertSetEqual(expected, set(stdout_patch.getvalue().split())) + if __name__ == "__main__": unittest.main() From 7625dc9b6450543627307b19747cb05ec0f6abb2 Mon Sep 17 00:00:00 2001 From: katie Date: Thu, 20 Mar 2025 14:51:15 +0100 Subject: [PATCH 6/6] tests: use string input instead of nargs for exec_params to allow for quoted run tests --- .../snapd-testing-tools/tests/log-analyzer/task.yaml | 3 ++- .../external/snapd-testing-tools/utils/log-analyzer | 7 +++---- .../snapd-testing-tools/utils/test_log_analyzer.py | 12 ++++++------ 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/lib/external/snapd-testing-tools/tests/log-analyzer/task.yaml b/tests/lib/external/snapd-testing-tools/tests/log-analyzer/task.yaml index ad851a4f6e1..8efc4897b15 100644 --- a/tests/lib/external/snapd-testing-tools/tests/log-analyzer/task.yaml +++ b/tests/lib/external/snapd-testing-tools/tests/log-analyzer/task.yaml @@ -242,6 +242,7 @@ execute: | ### CHECK ALL ### compare_tasks_count "$(log-analyzer list-all-tasks google:)" 10 - compare_tasks_count "$(log-analyzer list-all-tasks google:ubuntu-20.04-64: google:ubuntu-22.04-64:)" 10 + compare_tasks_count "$(log-analyzer list-all-tasks 'google:ubuntu-20.04-64: google:ubuntu-22.04-64:')" 10 + compare_tasks_count "$(log-analyzer list-all-tasks 'google:ubuntu-20.04-64:,google:ubuntu-22.04-64:')" 10 compare_tasks_count "$(log-analyzer list-all-tasks google:ubuntu-20.04-64:)" 5 compare_tasks_count "$(log-analyzer list-all-tasks google:ubuntu-22.04-64:)" 5 diff --git a/tests/lib/external/snapd-testing-tools/utils/log-analyzer b/tests/lib/external/snapd-testing-tools/utils/log-analyzer index 8665a39bb4c..9a98f83420f 100755 --- a/tests/lib/external/snapd-testing-tools/utils/log-analyzer +++ b/tests/lib/external/snapd-testing-tools/utils/log-analyzer @@ -133,7 +133,6 @@ def list_rexecute_tasks( def add_arguments(parser: argparse.ArgumentParser) -> None: parser.add_argument( "exec_params", - nargs='+', help="This is the parameter used to run spread (something like this BACKEND:SYSTEM:SUITE)", ) parser.add_argument( @@ -193,13 +192,13 @@ def main() -> None: add_arguments(reexecute) list_all.add_argument( "exec_params", - nargs='+', help="This is the parameter used to run spread (something like this BACKEND:SYSTEM:SUITE)", ) args = parser.parse_args() - filtered_exec_param = set(filter_with_spread(args.exec_params)) + exec_params = args.exec_params.replace(",", " ").split() + filtered_exec_param = set(filter_with_spread(exec_params)) if hasattr(args, "parsed_log"): log = json.load(args.parsed_log) @@ -218,7 +217,7 @@ def main() -> None: print(" ".join(filtered_exec_param)) elif args.command == "list-reexecute-tasks": print(" ".join(list_rexecute_tasks( - args.exec_params, filtered_exec_param, log))) + exec_params, filtered_exec_param, log))) else: raise RuntimeError("log.analyzer: no such command: %s" % args.command) diff --git a/tests/lib/external/snapd-testing-tools/utils/test_log_analyzer.py b/tests/lib/external/snapd-testing-tools/utils/test_log_analyzer.py index 2ad6745d846..e936ef541e0 100644 --- a/tests/lib/external/snapd-testing-tools/utils/test_log_analyzer.py +++ b/tests/lib/external/snapd-testing-tools/utils/test_log_analyzer.py @@ -371,7 +371,7 @@ def test_list_executed__main(self, parse_args_mock: Mock) -> None: "test_" + str(i) for i in range(50)] parse_args_mock.return_value = argparse.Namespace( command='list-executed-tasks', - exec_params=self.exec_param_mixed, + exec_params=' '.join(self.exec_param_mixed), parsed_log=StringIO(json.dumps(self.spread_logs_mixed)) ) with patch('sys.stdout', new=StringIO()) as stdout_patch: @@ -386,7 +386,7 @@ def test_list_failed__main(self, parse_args_mock: Mock) -> None: "test_" + str(i) for i in range(50)] parse_args_mock.return_value = argparse.Namespace( command='list-failed-tasks', - exec_params=self.exec_param_mixed, + exec_params=' '.join(self.exec_param_mixed), parsed_log=StringIO(json.dumps(self.spread_logs_mixed)) ) with patch('sys.stdout', new=StringIO()) as stdout_patch: @@ -401,7 +401,7 @@ def test_list_successful__main(self, parse_args_mock: Mock) -> None: "test_" + str(i) for i in range(50)] parse_args_mock.return_value = argparse.Namespace( command='list-successful-tasks', - exec_params=self.exec_param_mixed, + exec_params=' '.join(self.exec_param_mixed), parsed_log=StringIO(json.dumps(self.spread_logs_mixed)) ) with patch('sys.stdout', new=StringIO()) as stdout_patch: @@ -416,7 +416,7 @@ def test_aborted_tasks__main(self, parse_args_mock: Mock) -> None: "test_" + str(i) for i in range(50)] parse_args_mock.return_value = argparse.Namespace( command='list-aborted-tasks', - exec_params=self.exec_param_mixed, + exec_params=' '.join(self.exec_param_mixed), parsed_log=StringIO(json.dumps(self.spread_logs_mixed)) ) with patch('sys.stdout', new=StringIO()) as stdout_patch: @@ -431,7 +431,7 @@ def test_reexecute_tasks__main(self, parse_args_mock: Mock) -> None: "test_" + str(i) for i in range(50)] parse_args_mock.return_value = argparse.Namespace( command='list-reexecute-tasks', - exec_params=self.exec_param_mixed, + exec_params=' '.join(self.exec_param_mixed), parsed_log=StringIO(json.dumps(self.spread_logs_mixed)) ) with patch('sys.stdout', new=StringIO()) as stdout_patch: @@ -446,7 +446,7 @@ def test_reexecute_tasks__main_no_exec(self, parse_args_mock: Mock) -> None: "test_" + str(i) for i in range(50)] parse_args_mock.return_value = argparse.Namespace( command='list-reexecute-tasks', - exec_params=self.exec_param_no_exec, + exec_params=' '.join(self.exec_param_no_exec), parsed_log=StringIO(json.dumps(self.spread_logs_no_exec)) ) with patch('sys.stdout', new=StringIO()) as stdout_patch: