From 4d0258177ec9443029258822506f98cf3bbe451e Mon Sep 17 00:00:00 2001 From: katie Date: Mon, 24 Feb 2025 14:18:54 +0100 Subject: [PATCH 1/9] tests: add feature-tagging workflow --- .github/workflows/feature-tagging.yaml | 80 +++++++++++++++ .github/workflows/spread-tests.yaml | 29 +++++- tests/lib/compose-features.py | 132 +++++++++++++++++++++++++ 3 files changed, 240 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/feature-tagging.yaml create mode 100755 tests/lib/compose-features.py diff --git a/.github/workflows/feature-tagging.yaml b/.github/workflows/feature-tagging.yaml new file mode 100644 index 00000000000..78acf559613 --- /dev/null +++ b/.github/workflows/feature-tagging.yaml @@ -0,0 +1,80 @@ +name: Feature Tagging + +on: + workflow_dispatch: + inputs: + features: + type: string + description: 'Comma-separated list of features to tag' + default: 'all' + +jobs: + read-systems: + runs-on: ubuntu-latest + outputs: + fundamental-systems: ${{ steps.read-systems.outputs.fundamental-systems }} + non-fundamental-systems: ${{ steps.read-systems.outputs.non-fundamental-systems }} + nested-systems: ${{ steps.read-systems.outputs.nested-systems }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Read matrix file + id: read-systems + shell: bash + run: | + echo "fundamental-systems=$(jq -c . ./.github/workflows/fundamental-systems.json)" >> $GITHUB_OUTPUT + echo "non-fundamental-systems=$(jq -c . ./.github/workflows/non-fundamental-systems.json)" >> $GITHUB_OUTPUT + echo "nested-systems=$(jq -c . ./.github/workflows/nested-systems.json)" >> $GITHUB_OUTPUT + + tag-features-fundamental: + uses: ./.github/workflows/spread-tests.yaml + needs: [read-systems] + name: "spread ${{ matrix.group }}" + with: + runs-on: '["self-hosted", "spread-enabled"]' + group: ${{ matrix.group }} + backend: ${{ matrix.backend }} + systems: ${{ matrix.systems }} + tasks: ${{ matrix.tasks }} + rules: ${{ matrix.rules }} + is-fundamental: true + use-snapd-snap-from-master: true + spread-tag-features: ${{ inputs.features }} + strategy: + fail-fast: false + matrix: ${{ fromJson(needs.read-systems.outputs.fundamental-systems) }} + + tag-features-non-fundamental: + uses: ./.github/workflows/spread-tests.yaml + needs: [read-systems] + name: "spread ${{ matrix.group }}" + with: + runs-on: '["self-hosted", "spread-enabled"]' + group: ${{ matrix.group }} + backend: ${{ matrix.backend }} + systems: ${{ matrix.systems }} + tasks: ${{ matrix.tasks }} + rules: ${{ matrix.rules }} + use-snapd-snap-from-master: true + spread-tag-features: ${{ inputs.features }} + strategy: + fail-fast: false + matrix: ${{ fromJson(needs.read-systems.outputs.non-fundamental-systems) }} + + tag-features-nested: + uses: ./.github/workflows/spread-tests.yaml + needs: [read-systems] + name: "spread ${{ matrix.group }}" + with: + runs-on: '["self-hosted", "spread-enabled"]' + group: ${{ matrix.group }} + backend: ${{ matrix.backend }} + systems: ${{ matrix.systems }} + tasks: ${{ matrix.tasks }} + rules: ${{ matrix.rules }} + use-snapd-snap-from-master: true + spread-tag-features: ${{ inputs.features }} + strategy: + fail-fast: false + matrix: ${{ fromJson(needs.read-systems.outputs.nested-systems) }} \ No newline at end of file diff --git a/.github/workflows/spread-tests.yaml b/.github/workflows/spread-tests.yaml index ac294396540..059eac118fc 100644 --- a/.github/workflows/spread-tests.yaml +++ b/.github/workflows/spread-tests.yaml @@ -47,12 +47,17 @@ on: description: 'Comma-separated list of experimental snapd features to enable with: snap set system "experimental.=true"' required: false type: string + spread-tag-features: + description: 'If specified, will tag the spread results with the specified features (comma-separated)' + required: false + type: string jobs: run-spread: env: SPREAD_EXPERIMENTAL_FEATURES: ${{ inputs.spread-experimental-features }} + SPREAD_TAG_FEATURES: ${{ inputs.spread-tag-features }} runs-on: ${{ fromJSON(inputs.runs-on) }} steps: @@ -306,6 +311,12 @@ jobs: exit 0 fi + SPREAD_FLAGS='-no-debug-output -logs spread-logs' + if [ -n "$SPREAD_TAG_FEATURES" ]; then + SPREAD_FLAGS="$SPREAD_FLAGS -artifacts spread-artifacts" + echo "ARTIFACTS_FOLDER=spread-artifacts" >> $GITHUB_ENV + fi + # Run spread tests # "pipefail" ensures that a non-zero status from the spread is # propagated; and we use a subshell as this option could trigger @@ -313,7 +324,7 @@ jobs: echo "Running command: $SPREAD $RUN_TESTS" ( set -o pipefail - $SPREAD -no-debug-output -logs spread-logs $RUN_TESTS | \ + $SPREAD $SPREAD_FLAGS $RUN_TESTS | \ ./tests/lib/external/snapd-testing-tools/utils/log-filter $FILTER_PARAMS | \ tee spread.log ) @@ -378,6 +389,22 @@ jobs: echo "TEST_FAILED=true" >> $GITHUB_ENV fi + - name: Analyze feature tags + if: always() && env.SPREAD_TAG_FEATURES != '' + run: | + ./tests/lib/compose-features.py \ + --dir "${ARTIFACTS_FOLDER}/feature-tags" \ + --output "feature-tags" \ + --failed-tests "$(cat $FAILED_TESTS_FILE)" \ + --env-variables "SPREAD_EXPERIMENTAL_FEATURES=${SPREAD_EXPERIMENTAL_FEATURES},SPREAD_SNAPD_DEB_FROM_REPO=${SPREAD_SNAPD_DEB_FROM_REPO}" + + - name: Upload feature tags + if: always() && env.SPREAD_TAG_FEATURES != '' + uses: actions/upload-artifact@v4 + with: + name: "feature-tags-${{ inputs.group }}-${{ inputs.systems }}" + path: "feature-tags" + - name: Save spread test results to cache if: always() uses: actions/cache/save@v4 diff --git a/tests/lib/compose-features.py b/tests/lib/compose-features.py new file mode 100755 index 00000000000..3635bf2220a --- /dev/null +++ b/tests/lib/compose-features.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python3 + +import argparse +import json +import os + + +def _parse_file_name(file_name: str) -> tuple[str, str, str, str]: + ''' + Given a file name in the format with inverted slashes ::suite\\path\\test:variant, + it returns the original name, the suite name, the test name and the variant name. + So in the example, it returns: + - original_name = ::suite/path/test:variant + - suite_name = suite/path + - test_name = test + - variant_name = variant + + :param file_name: The file name to parse + :returns: A tuple with the original name, the suite name, the test name and the variant name. If variant is not present, it returns None. + ''' + original_name = file_name.replace('\\', '/') + task = ":".join(original_name.split(':')[2:]) + suite_name = "/".join(task.split('/')[:-1]) + test_name = task.split('/')[-1] + variant_name = None + if test_name.count(':') == 1: + variant_name = test_name.split(':')[1] + test_name = test_name.split(':')[0] + return original_name, suite_name, test_name, variant_name + + +def _compose_test(dir: str, file: str, failed_tests: str) -> dict: + ''' + Creates a dictionary with the features of a test and test information. + The features are read from the file and the test information is extracted from the file name. + + :param dir: The directory where the file is located + :param file: The file name + :param failed_tests: A list of failed tests + :returns: A dictionary with test information and features + ''' + with open(os.path.join(dir, file), 'r') as f: + original, suite_name, test_name, variant_name = _parse_file_name(file) + features = {} + features['suite'] = suite_name + features['task-name'] = test_name + features['variant'] = variant_name + features['success'] = original not in failed_tests + features.update(json.loads(f.read())) + return features + + +def _compose_env_variables(env_variables: str) -> list[dict]: + ''' + Given environment variables in the form of a comma-separated list of key=value, + it creates a list of dictionaries of [{"name": , "value": }...] + + :param env_variables: a comma-seprated list of key=value environment variables + :returns: A list of dictionaries + ''' + composed = [] + for env in env_variables.split(',') if env_variables else []: + name, value = env.split('=') + composed.append({"name": name, "value": value}) + return composed + + +def compose_system(dir: str, system: str, failed_tests: str = "", env_variables: str = None, scenarios: str = None) -> dict: + ''' + Given a containing directory, a system-identifying string, and other information + about failed tests, environment variables, and scenarios, it creates a dictionary + containing the feature information found in the files contained in the directory + for that system. + + :param dir: Directory that contains feature-tagging files + :param system: Identifying string to select only files with that string + :param failed_tests: String containing the names of failing tests + :param env_variables: Comma-separated string of key=value environment variables + :param scenarios: Comma-separated string of scenario names + :returns: Dictionary containing all tests and tests information for the system + ''' + files = [file for file in os.listdir( + dir) if system in file and file.count(':') >= 2] + system_dict = { + 'schema-version': '0.0.0', + 'system': files[0].split(':')[1] if len(files) > 0 else "", + 'scenarios': scenarios.split(',') if scenarios else [], + 'env-variables': _compose_env_variables(env_variables), + 'tests': [_compose_test(dir, file, failed_tests) for file in files], + } + return system_dict + + +def get_system_list(dir: str) -> set: + ''' + Constructs a list of all systems from the filenames in the specified directory + + :param dir: Directory containing feature-tagging information for tests + :returns: Set of identifying strings for systems + ''' + files = [f for f in os.listdir( + dir) if os.path.isfile(os.path.join(dir, f))] + systems = [":".join(file.split(':')[:2]) + for file in files if file.count(':') >= 2] + return set(systems) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description=""" + Given a directory containing files with outputs of journal-analzyer.py with filenames + of format ::suite\\path\\:, it will construct a json + file for each : with feature-tagging information, accompanied with + additional test information. + """) + parser.add_argument('-d', '--dir', type=str, + help='Path to the feature-tags folder') + parser.add_argument('-o', '--output', type=str, help='Output directory') + parser.add_argument('-s', '--scenarios', type=str, + help='Comma-separated list of scenarios', default="") + parser.add_argument('-e', '--env-variables', type=str, + help='Comma-separated list of environment variables as key=value', default="") + parser.add_argument('-f', '--failed-tests', type=str, + help='List of failed tests', default="") + args = parser.parse_args() + os.makedirs(args.output, exist_ok=True) + systems = get_system_list(args.dir) + for system in systems: + composed = compose_system(dir=args.dir, system=system, + failed_tests=args.failed_tests, env_variables=args.env_variables) + system = "_".join(system.split(':')) + with open(os.path.join(args.output, system + '.json'), 'w') as f: + f.write(json.dumps(composed)) From ab142d3c89c22df303bda0abc96b546b2ae820ac Mon Sep 17 00:00:00 2001 From: katie Date: Tue, 25 Feb 2025 10:09:29 +0100 Subject: [PATCH 2/9] tests: add workflow rerun in case of failure --- .github/workflows/feature-tagging.yaml | 15 ++++++++++++++- .github/workflows/rerun.yaml | 18 ++++++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/rerun.yaml diff --git a/.github/workflows/feature-tagging.yaml b/.github/workflows/feature-tagging.yaml index 78acf559613..7a5fe0506de 100644 --- a/.github/workflows/feature-tagging.yaml +++ b/.github/workflows/feature-tagging.yaml @@ -77,4 +77,17 @@ jobs: spread-tag-features: ${{ inputs.features }} strategy: fail-fast: false - matrix: ${{ fromJson(needs.read-systems.outputs.nested-systems) }} \ No newline at end of file + matrix: ${{ fromJson(needs.read-systems.outputs.nested-systems) }} + + re-run: + permissions: + actions: write + needs: [tag-features-fundamental, tag-features-non-fundamental, tag-features-nested] + # If the spread tests ended in failure, rerun the workflow up to 2 times + if: failure() && fromJSON(github.run_attempt) < 3 + runs-on: ubuntu-latest + steps: + - env: + GH_REPO: ${{ github.repository }} + GH_TOKEN: ${{ github.token }} + run: gh workflow run rerun.yaml -F run_id=${{ github.run_id }} diff --git a/.github/workflows/rerun.yaml b/.github/workflows/rerun.yaml new file mode 100644 index 00000000000..c0b7845e52c --- /dev/null +++ b/.github/workflows/rerun.yaml @@ -0,0 +1,18 @@ +on: + workflow_dispatch: + inputs: + run_id: + required: true +jobs: + rerun: + permissions: + actions: write + runs-on: ubuntu-latest + steps: + - name: rerun ${{ inputs.run_id }} + env: + GH_REPO: ${{ github.repository }} + GH_TOKEN: ${{ github.token }} + run: | + gh run watch ${{ inputs.run_id }} > /dev/null 2>&1 + gh run rerun ${{ inputs.run_id }} --failed \ No newline at end of file From 42e77c56ae43668a43f608c1c1fa3eb1cc99acae Mon Sep 17 00:00:00 2001 From: katie Date: Tue, 25 Feb 2025 13:01:02 +0100 Subject: [PATCH 3/9] tests: add feature-tag consolidation if spread task has been rerun --- .github/workflows/feature-tagging.yaml | 71 +++++++++++++++- .github/workflows/spread-tests.yaml | 3 +- run-spread | 42 +++++++++- tests/lib/compose-features.py | 107 ++++++++++++++++++++++++- 4 files changed, 215 insertions(+), 8 deletions(-) diff --git a/.github/workflows/feature-tagging.yaml b/.github/workflows/feature-tagging.yaml index 7a5fe0506de..006aee60052 100644 --- a/.github/workflows/feature-tagging.yaml +++ b/.github/workflows/feature-tagging.yaml @@ -7,6 +7,10 @@ on: type: string description: 'Comma-separated list of features to tag' default: 'all' + maximum-reruns: + type: number + description: 'Maximum number of times to rerun failed spread tasks upon failure' + default: 3 jobs: read-systems: @@ -83,11 +87,74 @@ jobs: permissions: actions: write needs: [tag-features-fundamental, tag-features-non-fundamental, tag-features-nested] - # If the spread tests ended in failure, rerun the workflow up to 2 times - if: failure() && fromJSON(github.run_attempt) < 3 + # If the spread tests ended in failure, rerun the workflow up to maximum-reruns-1 times + if: failure() && fromJSON(github.run_attempt) < fromJSON(inputs.maximum-reruns) runs-on: ubuntu-latest steps: - env: GH_REPO: ${{ github.repository }} GH_TOKEN: ${{ github.token }} run: gh workflow run rerun.yaml -F run_id=${{ github.run_id }} + + create-reports: + needs: [tag-features-fundamental, tag-features-non-fundamental, tag-features-nested] + runs-on: ubuntu-latest + if: success() || fromJSON(github.run_attempt) >= fromJSON(inputs.maximum-reruns) + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Get generated data + uses: actions/github-script@v6 + with: + script: | + let page = 1; + let per_page = 100; + let allArtifacts = []; + let response; + do { + response = await github.rest.actions.listWorkflowRunArtifacts({ + owner: context.repo.owner, + repo: context.repo.repo, + run_id: context.runId, + per_page: per_page, + page: page + }); + allArtifacts = allArtifacts.concat(response.data.artifacts); + page++; + } while (response.data.artifacts.length === per_page); + + let matchingArtifacts = allArtifacts.filter((artifact) => { + return artifact.name.startsWith(`feature-tags`); + }); + + for (let artifact of matchingArtifacts) { + let download = await github.rest.actions.downloadArtifact({ + owner: context.repo.owner, + repo: context.repo.repo, + artifact_id: artifact.id, + archive_format: 'zip', + }); + let fs = require('fs'); + fs.writeFileSync(`${process.env.GITHUB_WORKSPACE}/${artifact.name}.zip`, Buffer.from(download.data)); + console.log(`Downloaded artifact: ${artifact.name}.zip`); + } + - name: Unzip artifacts + run: | + mkdir -p feature-tags-artifacts + find . -name "feature-tags*.zip" | while read filename; do + unzip "$filename" -d "feature-tags-artifacts" + done + + - name: Consolidate feature data + run: | + ./tests/lib/compose-features.py \ + --dir "feature-tags-artifacts" \ + --output "final-feature-tags" \ + --replace-old-runs + + - name: Upload feature data + uses: actions/upload-artifact@v4 + with: + name: "feature-tags" + path: "final-feature-tags" \ No newline at end of file diff --git a/.github/workflows/spread-tests.yaml b/.github/workflows/spread-tests.yaml index 059eac118fc..b7cb6e2d96d 100644 --- a/.github/workflows/spread-tests.yaml +++ b/.github/workflows/spread-tests.yaml @@ -396,13 +396,14 @@ jobs: --dir "${ARTIFACTS_FOLDER}/feature-tags" \ --output "feature-tags" \ --failed-tests "$(cat $FAILED_TESTS_FILE)" \ + --run-attempt ${{ github.run_attempt }} \ --env-variables "SPREAD_EXPERIMENTAL_FEATURES=${SPREAD_EXPERIMENTAL_FEATURES},SPREAD_SNAPD_DEB_FROM_REPO=${SPREAD_SNAPD_DEB_FROM_REPO}" - name: Upload feature tags if: always() && env.SPREAD_TAG_FEATURES != '' uses: actions/upload-artifact@v4 with: - name: "feature-tags-${{ inputs.group }}-${{ inputs.systems }}" + name: "feature-tags-${{ inputs.group }}-${{ inputs.systems }}_${{ github.run_attempt }}" path: "feature-tags" - name: Save spread test results to cache diff --git a/run-spread b/run-spread index c7cc7cd94a5..f30d06122b4 100755 --- a/run-spread +++ b/run-spread @@ -28,5 +28,45 @@ if [ "$need_rebuild" = 1 ]; then echo "-- $(date) -- snapd snap rebuild complete" fi +if [ -z "$SPREAD_TAG_FEATURES" ]; then + SPREAD_USE_PREBUILT_SNAPD_SNAP=true exec spread "$@" +else + WRITE_DIR="/tmp/features" + RUN_TESTS=("$@") + NUM_ATTEMPTS=${NUM_ATTEMPTS:-1} + export SPREAD_USE_PREBUILT_SNAPD_SNAP=true + mkdir -p "$WRITE_DIR" + for i in $(seq 1 "$NUM_ATTEMPTS"); do + + spread -artifacts "${WRITE_DIR}"/features-artifacts -no-debug-output "${RUN_TESTS[@]}" | tee "${WRITE_DIR}/spread-logs.txt" + + if [ -f "$WRITE_DIR"/spread-logs.txt ]; then + ./tests/lib/external/snapd-testing-tools/utils/log-parser "${WRITE_DIR}"/spread-logs.txt --output "${WRITE_DIR}"/spread-results.json + ./tests/lib/external/snapd-testing-tools/utils/log-analyzer list-reexecute-tasks "${RUN_TESTS[@]}" "${WRITE_DIR}"/spread-results.json > "${WRITE_DIR}"/failed-tests.txt + else + touch "${WRITE_DIR}/failed-tests.txt" + fi + + ./tests/lib/compose-features.py \ + --dir ${WRITE_DIR}/features-artifacts/feature-tags \ + --output ${WRITE_DIR}/composed-feature-tags \ + --failed-tests "$(cat ${WRITE_DIR}/failed-tests.txt)" \ + --run-attempt "${i}" + + if [ -z "$(cat ${WRITE_DIR}/failed-tests.txt)" ]; then + break + fi + + mapfile RUN_TESTS < "${WRITE_DIR}"/failed-tests.txt + done + + ./tests/lib/compose-features.py \ + --dir ${WRITE_DIR}/composed-feature-tags \ + --output ${WRITE_DIR}/final-feature-tags \ + --replace-old-runs + + + echo "Your feature tags can be found in $WRITE_DIR/final-feature-tags" +fi + # Run spread -SPREAD_USE_PREBUILT_SNAPD_SNAP=true exec spread "$@" diff --git a/tests/lib/compose-features.py b/tests/lib/compose-features.py index 3635bf2220a..b961694a630 100755 --- a/tests/lib/compose-features.py +++ b/tests/lib/compose-features.py @@ -3,6 +3,7 @@ import argparse import json import os +import shutil def _parse_file_name(file_name: str) -> tuple[str, str, str, str]: @@ -105,28 +106,126 @@ def get_system_list(dir: str) -> set: return set(systems) +def _replace_tests(old_json_file, new_json_file): + ''' + The new_json_file contains a subset of the tests found in the old_json_file. + This function leaves not-rerun tests untouched, while replacing old test + runs with their rerun counterparts found in new_json_file. The resulting + json in output therefore contains a mix of tests that were not rerun and + the latest version of tests that were rerun. + + :param old_json_file: file path of first run of composed features + :param new_json_file: file path of rerun of composed features + :returns: dictionary that contains the first run data with rerun tests + replaced by the rerun data from the new_json_file + ''' + with open(old_json_file, 'r') as f: + old_json = json.load(f) + with open(new_json_file, 'r') as f: + new_json = json.load(f) + for test in new_json['tests']: + for old_test in old_json['tests']: + if old_test['task-name'] == test['task-name'] and old_test['suite'] == test['suite'] and old_test['variant'] == test['variant']: + old_test.clear() + for key, value in test.items(): + old_test[key] = value + break + return old_json + + +def replace_old_runs(dir, output_dir): + ''' + Given the directory in input (dir) that contains a set of files of original + run data together with rerun data, this populates the specified output_dir + with a consolidated set of composed features, one per system. An original + composed features file is a file that ends in _1.json. A rerun composed + features file is a file that ends in _.json where is greater + than 1. The numbering is automatically generated when the compose features + script was called with the --run-attempt + + + :param dir: directory containing composed feature files with varying run + attempt numbers + :param output_dir: directory where to write the consolidated composed features + ''' + os.makedirs(output_dir) + filenames_no_ext = [os.path.splitext(f)[0] for f in os.listdir( + dir) if os.path.isfile(os.path.join(dir, f))] + reruns_no_ext = [ + file for file in filenames_no_ext if not file.endswith('_1')] + originals_no_ext = [file for file in filenames_no_ext if file.endswith( + '_1') and any(rerun for rerun in reruns_no_ext if rerun.startswith(file[:-2]))] + reruns_no_ext.sort(key=lambda x: int(x.split('_')[-1])) + for rerun in reruns_no_ext: + beginning = '_'.join(rerun.split('_')[:-1]) + original = list( + filter(lambda x: x.startswith(beginning), originals_no_ext)) + if len(original) != 1: + raise RuntimeError( + "The rerun %s does not have a corresponding original run" % rerun) + tests = _replace_tests(os.path.join( + dir, original[0] + ".json"), os.path.join(dir, rerun + ".json")) + with open(os.path.join(output_dir, beginning + ".json"), 'w') as f: + f.write(json.dumps(tests)) + for file in filenames_no_ext: + if file not in originals_no_ext: + shutil.copyfile(os.path.join(dir, file + ".json"), + os.path.join(output_dir, '_'.join(file.split('_')[:-1]) + '.json')) + + if __name__ == '__main__': parser = argparse.ArgumentParser(description=""" + Can be run in two modes: composed feature generation or composed feature consolidation + + Composed feature generation mode + Given a directory containing files with outputs of journal-analzyer.py with filenames of format ::suite\\path\\:, it will construct a json file for each : with feature-tagging information, accompanied with additional test information. + + Composed feature consolidation mode + + Given a directory containing files of pre-composed feature information with filenames like + _.json, it writes the consolidated feature information in a new + directory (specified with the --output flag) where the latest rerun data replaces the old. + So if a file contains one test that was later rerun, the new consolidated file will contain + unaltered content from the original run except for the one test rerun that will replace + the old. """) - parser.add_argument('-d', '--dir', type=str, - help='Path to the feature-tags folder') - parser.add_argument('-o', '--output', type=str, help='Output directory') + parser.add_argument('-d', '--dir', type=str, required=True, + help='Path to the folder containing json files') + parser.add_argument('-o', '--output', type=str, + help='Output directory', required=True) parser.add_argument('-s', '--scenarios', type=str, help='Comma-separated list of scenarios', default="") parser.add_argument('-e', '--env-variables', type=str, help='Comma-separated list of environment variables as key=value', default="") parser.add_argument('-f', '--failed-tests', type=str, help='List of failed tests', default="") + parser.add_argument('--run-attempt', type=int, help="""Run attempt number of the json files contained in the folder [1,). + Only needed when rerunning spread for failed tests. When specified, will append the run attempt + number on the filename, which will then be used when running this script with the --replace-old-runs + flag to determine replacement order""") + parser.add_argument('-r', '--replace-old-runs', action="store_true", + help='When set, will process pre-composed runs and consolidate them into the output dir') args = parser.parse_args() + + if args.replace_old_runs: + replace_old_runs(args.dir, args.output) + exit(0) + + attempt = "" + if args.run_attempt: + if args.run_attempt == 0: + raise RuntimeError( + "The first run attempt must be 1. 0 is not allowed") + attempt = "_%s" % args.run_attempt os.makedirs(args.output, exist_ok=True) systems = get_system_list(args.dir) for system in systems: composed = compose_system(dir=args.dir, system=system, failed_tests=args.failed_tests, env_variables=args.env_variables) system = "_".join(system.split(':')) - with open(os.path.join(args.output, system + '.json'), 'w') as f: + with open(os.path.join(args.output, system + attempt + '.json'), 'w') as f: f.write(json.dumps(composed)) From 8ff33958ffe340bc349365289167bd0989fe92cd Mon Sep 17 00:00:00 2001 From: katie Date: Fri, 28 Feb 2025 12:11:24 +0100 Subject: [PATCH 4/9] tests: added feature tagging on master pushes of only changed tests --- .github/workflows/feature-tagging.yaml | 46 +++++++++++++++------ tests/lib/spread/rules/feature-tagging.yaml | 18 ++++++++ 2 files changed, 51 insertions(+), 13 deletions(-) create mode 100644 tests/lib/spread/rules/feature-tagging.yaml diff --git a/.github/workflows/feature-tagging.yaml b/.github/workflows/feature-tagging.yaml index 006aee60052..127846aa5d4 100644 --- a/.github/workflows/feature-tagging.yaml +++ b/.github/workflows/feature-tagging.yaml @@ -1,6 +1,9 @@ name: Feature Tagging on: + push: + branches: ["master"] + workflow_dispatch: inputs: features: @@ -11,8 +14,25 @@ on: type: number description: 'Maximum number of times to rerun failed spread tasks upon failure' default: 3 + run-all: + type: boolean + description: 'If true, will run all spread tests. If false, will only run tagging on changed spread tests in the last commit' + default: false jobs: + set-inputs: + runs-on: ubuntu-latest + outputs: + features: ${{ steps.step1.outputs.features }} + maximum-reruns: ${{ steps.step1.outputs.maximum-reruns }} + run-all: ${{ steps.step1.outputs.run-all }} + steps: + - name: Set inputs + run: | + echo "features=${{ inputs.features || 'all' }}" >> $GITHUB_OUTPUT + echo "maximum-reruns=${{ inputs.maximum-reruns || 3 }}" >> $GITHUB_OUTPUT + echo "run-all=${{ inputs.run-all || false }}" >> $GITHUB_OUTPUT + read-systems: runs-on: ubuntu-latest outputs: @@ -33,7 +53,7 @@ jobs: tag-features-fundamental: uses: ./.github/workflows/spread-tests.yaml - needs: [read-systems] + needs: [set-inputs, read-systems] name: "spread ${{ matrix.group }}" with: runs-on: '["self-hosted", "spread-enabled"]' @@ -41,17 +61,17 @@ jobs: backend: ${{ matrix.backend }} systems: ${{ matrix.systems }} tasks: ${{ matrix.tasks }} - rules: ${{ matrix.rules }} + rules: ${{ needs.set-inputs.outputs.run-all && matrix.rules || feature-tagging.yaml }} is-fundamental: true use-snapd-snap-from-master: true - spread-tag-features: ${{ inputs.features }} + spread-tag-features: ${{ needs.set-inputs.outputs.features }} strategy: fail-fast: false matrix: ${{ fromJson(needs.read-systems.outputs.fundamental-systems) }} tag-features-non-fundamental: uses: ./.github/workflows/spread-tests.yaml - needs: [read-systems] + needs: [set-inputs, read-systems] name: "spread ${{ matrix.group }}" with: runs-on: '["self-hosted", "spread-enabled"]' @@ -59,16 +79,16 @@ jobs: backend: ${{ matrix.backend }} systems: ${{ matrix.systems }} tasks: ${{ matrix.tasks }} - rules: ${{ matrix.rules }} + rules: ${{ needs.set-inputs.outputs.run-all && matrix.rules || feature-tagging.yaml }} use-snapd-snap-from-master: true - spread-tag-features: ${{ inputs.features }} + spread-tag-features: ${{ needs.set-inputs.outputs.features }} strategy: fail-fast: false matrix: ${{ fromJson(needs.read-systems.outputs.non-fundamental-systems) }} tag-features-nested: uses: ./.github/workflows/spread-tests.yaml - needs: [read-systems] + needs: [set-inputs, read-systems] name: "spread ${{ matrix.group }}" with: runs-on: '["self-hosted", "spread-enabled"]' @@ -76,9 +96,9 @@ jobs: backend: ${{ matrix.backend }} systems: ${{ matrix.systems }} tasks: ${{ matrix.tasks }} - rules: ${{ matrix.rules }} + rules: ${{ needs.set-inputs.outputs.run-all && matrix.rules || feature-tagging.yaml }} use-snapd-snap-from-master: true - spread-tag-features: ${{ inputs.features }} + spread-tag-features: ${{ needs.set-inputs.outputs.features }} strategy: fail-fast: false matrix: ${{ fromJson(needs.read-systems.outputs.nested-systems) }} @@ -86,9 +106,9 @@ jobs: re-run: permissions: actions: write - needs: [tag-features-fundamental, tag-features-non-fundamental, tag-features-nested] + needs: [set-inputs, tag-features-fundamental, tag-features-non-fundamental, tag-features-nested] # If the spread tests ended in failure, rerun the workflow up to maximum-reruns-1 times - if: failure() && fromJSON(github.run_attempt) < fromJSON(inputs.maximum-reruns) + if: failure() && fromJSON(github.run_attempt) < fromJSON(needs.set-inputs.outputs.maximum-reruns) runs-on: ubuntu-latest steps: - env: @@ -97,9 +117,9 @@ jobs: run: gh workflow run rerun.yaml -F run_id=${{ github.run_id }} create-reports: - needs: [tag-features-fundamental, tag-features-non-fundamental, tag-features-nested] + needs: [set-inputs, tag-features-fundamental, tag-features-non-fundamental, tag-features-nested] runs-on: ubuntu-latest - if: success() || fromJSON(github.run_attempt) >= fromJSON(inputs.maximum-reruns) + if: success() || fromJSON(github.run_attempt) >= fromJSON(needs.set-inputs.outputs.maximum-reruns) steps: - name: Checkout code uses: actions/checkout@v4 diff --git a/tests/lib/spread/rules/feature-tagging.yaml b/tests/lib/spread/rules/feature-tagging.yaml new file mode 100644 index 00000000000..6e9b2d50c06 --- /dev/null +++ b/tests/lib/spread/rules/feature-tagging.yaml @@ -0,0 +1,18 @@ +rules: + tests: + from: + - tests/main/.*/task.yaml + - tests/core/.*/task.yaml + - tests/completion/.*/task.yaml + - tests/cross/.*/task.yaml + - tests/regression/.*/task.yaml + - tests/smoke/.*/task.yaml + - tests/unit/.*/task.yaml + - tests/upgrade/.*/task.yaml + - tests/fips/.*/task.yaml + - tests/nested/.*/task.yaml + to: [$SELF] + + rest: + from: [.*] + to: [$NONE] From f6ddef5b32695a0fbf1d3affd998d6384e971427 Mon Sep 17 00:00:00 2001 From: katie Date: Fri, 14 Mar 2025 13:02:09 +0100 Subject: [PATCH 5/9] tests: addressed review comments and changed tests name format from using '\' to using '--' to reflect changes in debug tagging PR --- .github/workflows/feature-tagging.yaml | 2 +- .github/workflows/rerun.yaml | 2 +- run-spread | 4 +--- tests/lib/compose-features.py | 18 +++++++++++------- 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/.github/workflows/feature-tagging.yaml b/.github/workflows/feature-tagging.yaml index 127846aa5d4..d6e9b1605d9 100644 --- a/.github/workflows/feature-tagging.yaml +++ b/.github/workflows/feature-tagging.yaml @@ -177,4 +177,4 @@ jobs: uses: actions/upload-artifact@v4 with: name: "feature-tags" - path: "final-feature-tags" \ No newline at end of file + path: "final-feature-tags" diff --git a/.github/workflows/rerun.yaml b/.github/workflows/rerun.yaml index c0b7845e52c..206f9cc6038 100644 --- a/.github/workflows/rerun.yaml +++ b/.github/workflows/rerun.yaml @@ -15,4 +15,4 @@ jobs: GH_TOKEN: ${{ github.token }} run: | gh run watch ${{ inputs.run_id }} > /dev/null 2>&1 - gh run rerun ${{ inputs.run_id }} --failed \ No newline at end of file + gh run rerun ${{ inputs.run_id }} --failed diff --git a/run-spread b/run-spread index f30d06122b4..a167c19ea0e 100755 --- a/run-spread +++ b/run-spread @@ -53,7 +53,7 @@ else --failed-tests "$(cat ${WRITE_DIR}/failed-tests.txt)" \ --run-attempt "${i}" - if [ -z "$(cat ${WRITE_DIR}/failed-tests.txt)" ]; then + if [ ! -s "${WRITE_DIR}/failed-tests.txt" ]; then break fi @@ -68,5 +68,3 @@ else echo "Your feature tags can be found in $WRITE_DIR/final-feature-tags" fi - -# Run spread diff --git a/tests/lib/compose-features.py b/tests/lib/compose-features.py index b961694a630..d71910c63a1 100755 --- a/tests/lib/compose-features.py +++ b/tests/lib/compose-features.py @@ -8,7 +8,7 @@ def _parse_file_name(file_name: str) -> tuple[str, str, str, str]: ''' - Given a file name in the format with inverted slashes ::suite\\path\\test:variant, + Given a file name in the format with double slashes ::suite--path--test:variant, it returns the original name, the suite name, the test name and the variant name. So in the example, it returns: - original_name = ::suite/path/test:variant @@ -19,7 +19,7 @@ def _parse_file_name(file_name: str) -> tuple[str, str, str, str]: :param file_name: The file name to parse :returns: A tuple with the original name, the suite name, the test name and the variant name. If variant is not present, it returns None. ''' - original_name = file_name.replace('\\', '/') + original_name = file_name.replace('--', '/') task = ":".join(original_name.split(':')[2:]) suite_name = "/".join(task.split('/')[:-1]) test_name = task.split('/')[-1] @@ -173,6 +173,13 @@ def replace_old_runs(dir, output_dir): os.path.join(output_dir, '_'.join(file.split('_')[:-1]) + '.json')) +def run_attempt_type(value): + if value is not int or int(value) <= 0: + raise argparse.ArgumentTypeError( + "%s is invalid. Run attempts are integers and start at 1" % value) + return value + + if __name__ == '__main__': parser = argparse.ArgumentParser(description=""" Can be run in two modes: composed feature generation or composed feature consolidation @@ -192,7 +199,7 @@ def replace_old_runs(dir, output_dir): So if a file contains one test that was later rerun, the new consolidated file will contain unaltered content from the original run except for the one test rerun that will replace the old. - """) + """, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-d', '--dir', type=str, required=True, help='Path to the folder containing json files') parser.add_argument('-o', '--output', type=str, @@ -203,7 +210,7 @@ def replace_old_runs(dir, output_dir): help='Comma-separated list of environment variables as key=value', default="") parser.add_argument('-f', '--failed-tests', type=str, help='List of failed tests', default="") - parser.add_argument('--run-attempt', type=int, help="""Run attempt number of the json files contained in the folder [1,). + parser.add_argument('--run-attempt', type=run_attempt_type, help="""Run attempt number of the json files contained in the folder [1,). Only needed when rerunning spread for failed tests. When specified, will append the run attempt number on the filename, which will then be used when running this script with the --replace-old-runs flag to determine replacement order""") @@ -217,9 +224,6 @@ def replace_old_runs(dir, output_dir): attempt = "" if args.run_attempt: - if args.run_attempt == 0: - raise RuntimeError( - "The first run attempt must be 1. 0 is not allowed") attempt = "_%s" % args.run_attempt os.makedirs(args.output, exist_ok=True) systems = get_system_list(args.dir) From 7c83216d0b402ecec6a50f51859249bac395b791 Mon Sep 17 00:00:00 2001 From: katie Date: Fri, 14 Mar 2025 14:08:36 +0100 Subject: [PATCH 6/9] github: added option to force the use of the rules file --- .github/workflows/feature-tagging.yaml | 9 ++++++--- .github/workflows/spread-tests.yaml | 11 ++++++++++- tests/lib/compose-features.py | 2 +- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/.github/workflows/feature-tagging.yaml b/.github/workflows/feature-tagging.yaml index d6e9b1605d9..4eddf8fc841 100644 --- a/.github/workflows/feature-tagging.yaml +++ b/.github/workflows/feature-tagging.yaml @@ -61,10 +61,11 @@ jobs: backend: ${{ matrix.backend }} systems: ${{ matrix.systems }} tasks: ${{ matrix.tasks }} - rules: ${{ needs.set-inputs.outputs.run-all && matrix.rules || feature-tagging.yaml }} + rules: ${{ fromJSON(needs.set-inputs.outputs.run-all) && matrix.rules || 'feature-tagging.yaml' }} is-fundamental: true use-snapd-snap-from-master: true spread-tag-features: ${{ needs.set-inputs.outputs.features }} + force-use-ruleset: ${{ ! fromJSON(needs.set-inputs.outputs.run-all) }} strategy: fail-fast: false matrix: ${{ fromJson(needs.read-systems.outputs.fundamental-systems) }} @@ -79,9 +80,10 @@ jobs: backend: ${{ matrix.backend }} systems: ${{ matrix.systems }} tasks: ${{ matrix.tasks }} - rules: ${{ needs.set-inputs.outputs.run-all && matrix.rules || feature-tagging.yaml }} + rules: ${{ fromJSON(needs.set-inputs.outputs.run-all) && matrix.rules || 'feature-tagging.yaml' }} use-snapd-snap-from-master: true spread-tag-features: ${{ needs.set-inputs.outputs.features }} + force-use-ruleset: ${{ ! fromJSON(needs.set-inputs.outputs.run-all) }} strategy: fail-fast: false matrix: ${{ fromJson(needs.read-systems.outputs.non-fundamental-systems) }} @@ -96,9 +98,10 @@ jobs: backend: ${{ matrix.backend }} systems: ${{ matrix.systems }} tasks: ${{ matrix.tasks }} - rules: ${{ needs.set-inputs.outputs.run-all && matrix.rules || feature-tagging.yaml }} + rules: ${{ fromJSON(needs.set-inputs.outputs.run-all) && matrix.rules || 'feature-tagging.yaml' }} use-snapd-snap-from-master: true spread-tag-features: ${{ needs.set-inputs.outputs.features }} + force-use-ruleset: ${{ ! fromJSON(needs.set-inputs.outputs.run-all) }} strategy: fail-fast: false matrix: ${{ fromJson(needs.read-systems.outputs.nested-systems) }} diff --git a/.github/workflows/spread-tests.yaml b/.github/workflows/spread-tests.yaml index b7cb6e2d96d..92a5937359c 100644 --- a/.github/workflows/spread-tests.yaml +++ b/.github/workflows/spread-tests.yaml @@ -5,6 +5,11 @@ on: description: 'A json list of tags to indicate which runner to use' required: true type: string + force-use-ruleset: + description: 'If true, will force the use of the rulesset specified in the rules input' + required: false + default: false + type: boolean group: description: 'The name of the group of backends, systems, tests, and rules' required: true @@ -151,7 +156,11 @@ jobs: # The tests are just filtered when the change is a PR # When 'Run Nested' label is added in a PR, all the nested tests have to be executed TASKS_TO_RUN="" - if [ -z "${{ github.event.number }}" ] || [ "$RUN_NESTED" = 'true' ] || [ -z "${{ inputs.rules }}" ] || [ -z "$changes_param" ]; then + if [ "${{ inputs.force-use-ruleset }}" == "false" ] && \ + ( [ -z "${{ github.event.number }}" ] || \ + [ "$RUN_NESTED" = 'true' ] || \ + [ -z "${{ inputs.rules }}" ] || \ + [ -z "$changes_param" ] ); then for TASKS in ${{ inputs.tasks }}; do TASKS_TO_RUN="$TASKS_TO_RUN $prefix:$TASKS" done diff --git a/tests/lib/compose-features.py b/tests/lib/compose-features.py index d71910c63a1..89668d58742 100755 --- a/tests/lib/compose-features.py +++ b/tests/lib/compose-features.py @@ -187,7 +187,7 @@ def run_attempt_type(value): Composed feature generation mode Given a directory containing files with outputs of journal-analzyer.py with filenames - of format ::suite\\path\\:, it will construct a json + of format ::suite--path--:, it will construct a json file for each : with feature-tagging information, accompanied with additional test information. From c46602c0a414019641e6524e872a4ef42b7e6be3 Mon Sep 17 00:00:00 2001 From: katie Date: Wed, 19 Mar 2025 17:20:56 +0100 Subject: [PATCH 7/9] tests: create structures for features --- run-spread | 91 ++++---- ...ompose-features.py => compose_features.py} | 206 +++++++++++------- tests/lib/feature_dict.py | 65 ++++++ tests/lib/spread/rules/feature-tagging.yaml | 2 + tests/lib/test_compose_features.py | 104 +++++++++ 5 files changed, 338 insertions(+), 130 deletions(-) mode change 100755 => 100644 run-spread rename tests/lib/{compose-features.py => compose_features.py} (50%) create mode 100644 tests/lib/feature_dict.py create mode 100644 tests/lib/test_compose_features.py diff --git a/run-spread b/run-spread old mode 100755 new mode 100644 index a167c19ea0e..6fdd245e71b --- a/run-spread +++ b/run-spread @@ -6,65 +6,64 @@ need_rebuild=1 shopt -s nullglob if [ "${NO_REBUILD:-0}" = "1" ]; then - echo "-- $(date) -- requested no snap rebuild" - need_rebuild=0 + echo "-- $(date) -- requested no snap rebuild" + need_rebuild=0 - # check if we have any snaps built at all - built_snaps=(built-snap/snapd_*.snap.keep) - if (( "${#built_snaps[@]}" > 0 )); then - echo "-- $(date) -- found prebuilt snapd snaps:" - for s in "${built_snaps[@]}"; do - echo "-- $s" - done - else - echo "-- $(date) -- no prebuilt snaps found" - need_rebuild=1 - fi + # check if we have any snaps built at all + built_snaps=(built-snap/snapd_*.snap.keep) + if (("${#built_snaps[@]}" > 0)); then + echo "-- $(date) -- found prebuilt snapd snaps:" + for s in "${built_snaps[@]}"; do + echo "-- $s" + done + else + echo "-- $(date) -- no prebuilt snaps found" + need_rebuild=1 + fi fi if [ "$need_rebuild" = 1 ]; then - echo "-- $(date) -- rebuilding snapd snap" - ./tests/build-test-snapd-snap - echo "-- $(date) -- snapd snap rebuild complete" + echo "-- $(date) -- rebuilding snapd snap" + ./tests/build-test-snapd-snap + echo "-- $(date) -- snapd snap rebuild complete" fi if [ -z "$SPREAD_TAG_FEATURES" ]; then - SPREAD_USE_PREBUILT_SNAPD_SNAP=true exec spread "$@" + SPREAD_USE_PREBUILT_SNAPD_SNAP=true exec spread "$@" else - WRITE_DIR="/tmp/features" - RUN_TESTS=("$@") - NUM_ATTEMPTS=${NUM_ATTEMPTS:-1} - export SPREAD_USE_PREBUILT_SNAPD_SNAP=true - mkdir -p "$WRITE_DIR" - for i in $(seq 1 "$NUM_ATTEMPTS"); do + WRITE_DIR="/tmp/features" + RUN_TESTS=("$@") + NUM_ATTEMPTS=${NUM_ATTEMPTS:-1} + export SPREAD_USE_PREBUILT_SNAPD_SNAP=true + mkdir -p "$WRITE_DIR" + for i in $(seq 1 "$NUM_ATTEMPTS"); do - spread -artifacts "${WRITE_DIR}"/features-artifacts -no-debug-output "${RUN_TESTS[@]}" | tee "${WRITE_DIR}/spread-logs.txt" + spread -artifacts "${WRITE_DIR}"/features-artifacts -no-debug-output "${RUN_TESTS[@]}" | tee "${WRITE_DIR}/spread-logs.txt" - if [ -f "$WRITE_DIR"/spread-logs.txt ]; then - ./tests/lib/external/snapd-testing-tools/utils/log-parser "${WRITE_DIR}"/spread-logs.txt --output "${WRITE_DIR}"/spread-results.json - ./tests/lib/external/snapd-testing-tools/utils/log-analyzer list-reexecute-tasks "${RUN_TESTS[@]}" "${WRITE_DIR}"/spread-results.json > "${WRITE_DIR}"/failed-tests.txt - else - touch "${WRITE_DIR}/failed-tests.txt" - fi + if [ -f "$WRITE_DIR"/spread-logs.txt ]; then + ./tests/lib/external/snapd-testing-tools/utils/log-parser "${WRITE_DIR}"/spread-logs.txt --output "${WRITE_DIR}"/spread-results.json + ./tests/lib/external/snapd-testing-tools/utils/log-analyzer list-reexecute-tasks "${RUN_TESTS[@]}" "${WRITE_DIR}"/spread-results.json >"${WRITE_DIR}"/failed-tests.txt + else + touch "${WRITE_DIR}/failed-tests.txt" + fi - ./tests/lib/compose-features.py \ - --dir ${WRITE_DIR}/features-artifacts/feature-tags \ - --output ${WRITE_DIR}/composed-feature-tags \ - --failed-tests "$(cat ${WRITE_DIR}/failed-tests.txt)" \ - --run-attempt "${i}" - - if [ ! -s "${WRITE_DIR}/failed-tests.txt" ]; then - break - fi + ./tests/lib/compose-features.py \ + --dir ${WRITE_DIR}/features-artifacts/feature-tags \ + --output ${WRITE_DIR}/composed-feature-tags \ + --failed-tests "$(cat ${WRITE_DIR}/failed-tests.txt)" \ + --run-attempt "${i}" - mapfile RUN_TESTS < "${WRITE_DIR}"/failed-tests.txt - done + if [ ! -s "${WRITE_DIR}/failed-tests.txt" ]; then + break + fi - ./tests/lib/compose-features.py \ - --dir ${WRITE_DIR}/composed-feature-tags \ - --output ${WRITE_DIR}/final-feature-tags \ - --replace-old-runs + mapfile RUN_TESTS <"${WRITE_DIR}"/failed-tests.txt + done + ./tests/lib/compose-features.py \ + --dir ${WRITE_DIR}/composed-feature-tags \ + --output ${WRITE_DIR}/final-feature-tags \ + --replace-old-runs - echo "Your feature tags can be found in $WRITE_DIR/final-feature-tags" + echo "Your feature tags can be found in $WRITE_DIR/final-feature-tags" fi diff --git a/tests/lib/compose-features.py b/tests/lib/compose_features.py similarity index 50% rename from tests/lib/compose-features.py rename to tests/lib/compose_features.py index 89668d58742..06c0db39d1c 100755 --- a/tests/lib/compose-features.py +++ b/tests/lib/compose_features.py @@ -4,33 +4,36 @@ import json import os import shutil +from typing import Any +import feature_dict def _parse_file_name(file_name: str) -> tuple[str, str, str, str]: ''' - Given a file name in the format with double slashes ::suite--path--test:variant, - it returns the original name, the suite name, the test name and the variant name. - So in the example, it returns: - - original_name = ::suite/path/test:variant + Given a file name in the format with double slashes ::suite--path--task:variant + and optionally an extension, it returns the original name, the suite name, the task name, + and the variant name. So in the example, it returns: + - original_name = ::suite/path/task:variant - suite_name = suite/path - - test_name = test + - task_name = task - variant_name = variant :param file_name: The file name to parse - :returns: A tuple with the original name, the suite name, the test name and the variant name. If variant is not present, it returns None. + :returns: A tuple with the original name, the suite name, the task name and the variant name. If variant is not present, it returns None. ''' + file_name = os.path.splitext(file_name)[0] original_name = file_name.replace('--', '/') - task = ":".join(original_name.split(':')[2:]) - suite_name = "/".join(task.split('/')[:-1]) - test_name = task.split('/')[-1] - variant_name = None - if test_name.count(':') == 1: - variant_name = test_name.split(':')[1] - test_name = test_name.split(':')[0] - return original_name, suite_name, test_name, variant_name + task = ':'.join(original_name.split(':')[2:]) + suite_name = '/'.join(task.split('/')[:-1]) + task_name = task.split('/')[-1] + variant_name = '' + if task_name.count(':') == 1: + variant_name = task_name.split(':')[1] + task_name = task_name.split(':')[0] + return original_name, suite_name, task_name, variant_name -def _compose_test(dir: str, file: str, failed_tests: str) -> dict: +def _compose_test(dir: str, file: str, failed_tests: str) -> feature_dict.TaskFeatures: ''' Creates a dictionary with the features of a test and test information. The features are read from the file and the test information is extracted from the file name. @@ -41,17 +44,19 @@ def _compose_test(dir: str, file: str, failed_tests: str) -> dict: :returns: A dictionary with test information and features ''' with open(os.path.join(dir, file), 'r') as f: - original, suite_name, test_name, variant_name = _parse_file_name(file) - features = {} - features['suite'] = suite_name - features['task-name'] = test_name - features['variant'] = variant_name - features['success'] = original not in failed_tests + original, suite_name, result_name, variant_name = _parse_file_name( + file) + features = feature_dict.TaskFeatures( + suite=suite_name, + task_name=result_name, + variant=variant_name, + success=original not in failed_tests + ) features.update(json.loads(f.read())) return features -def _compose_env_variables(env_variables: str) -> list[dict]: +def _compose_env_variables(env_variables: str) -> list[feature_dict.EnvVariables]: ''' Given environment variables in the form of a comma-separated list of key=value, it creates a list of dictionaries of [{"name": , "value": }...] @@ -62,11 +67,11 @@ def _compose_env_variables(env_variables: str) -> list[dict]: composed = [] for env in env_variables.split(',') if env_variables else []: name, value = env.split('=') - composed.append({"name": name, "value": value}) + composed.append(feature_dict.EnvVariables(name = name.strip(), value=value.strip())) return composed -def compose_system(dir: str, system: str, failed_tests: str = "", env_variables: str = None, scenarios: str = None) -> dict: +def compose_system(dir: str, system: str, failed_tests: str = '', env_variables: str = '', scenarios: str = '') -> feature_dict.SystemFeatures: ''' Given a containing directory, a system-identifying string, and other information about failed tests, environment variables, and scenarios, it creates a dictionary @@ -82,31 +87,30 @@ def compose_system(dir: str, system: str, failed_tests: str = "", env_variables: ''' files = [file for file in os.listdir( dir) if system in file and file.count(':') >= 2] - system_dict = { - 'schema-version': '0.0.0', - 'system': files[0].split(':')[1] if len(files) > 0 else "", - 'scenarios': scenarios.split(',') if scenarios else [], - 'env-variables': _compose_env_variables(env_variables), - 'tests': [_compose_test(dir, file, failed_tests) for file in files], - } - return system_dict + return feature_dict.SystemFeatures( + schema_version='0.0.0', + system=system, + scenarios=[scenario.strip() + for scenario in scenarios.split(',')] if scenarios else [], + env_variables=_compose_env_variables(env_variables), + tests=[_compose_test(dir, file, failed_tests) for file in files] + ) -def get_system_list(dir: str) -> set: +def get_system_list(dir: str) -> set[str]: ''' Constructs a list of all systems from the filenames in the specified directory :param dir: Directory containing feature-tagging information for tests :returns: Set of identifying strings for systems ''' - files = [f for f in os.listdir( - dir) if os.path.isfile(os.path.join(dir, f))] - systems = [":".join(file.split(':')[:2]) - for file in files if file.count(':') >= 2] - return set(systems) + files = [f for f in os.listdir(dir) + if os.path.isfile(os.path.join(dir, f))] + return {':'.join(file.split(':')[:2]) + for file in files if file.count(':') >= 2} -def _replace_tests(old_json_file, new_json_file): +def _replace_tests(old_json_file: str, new_json_file: str) -> feature_dict.SystemFeatures: ''' The new_json_file contains a subset of the tests found in the old_json_file. This function leaves not-rerun tests untouched, while replacing old test @@ -125,7 +129,7 @@ def _replace_tests(old_json_file, new_json_file): new_json = json.load(f) for test in new_json['tests']: for old_test in old_json['tests']: - if old_test['task-name'] == test['task-name'] and old_test['suite'] == test['suite'] and old_test['variant'] == test['variant']: + if old_test['task_name'] == test['task_name'] and old_test['suite'] == test['suite'] and old_test['variant'] == test['variant']: old_test.clear() for key, value in test.items(): old_test[key] = value @@ -133,7 +137,40 @@ def _replace_tests(old_json_file, new_json_file): return old_json -def replace_old_runs(dir, output_dir): +def _get_original_and_rerun_list(filenames: list[str]) -> tuple[list[str], list[str]]: + ''' + Given a list of filenames, gets two lists of rerun information: + the first list contains the first run (of systems that were rerun) + while the second list contains all reruns, sorted from earliest to latest. + + Note: the list of first runs ONLY contains the first run of reruns; + it does not contain systems that had no rerun. + + :param filenames: a list of filenames + :returns: the list of first runs and the list of all reruns + ''' + def wo_ext(x): return os.path.splitext(x)[0] + reruns = [file for file in filenames if not wo_ext(file).endswith('_1')] + originals = [file for file in filenames + if wo_ext(file).endswith('_1') and + any(rerun for rerun in reruns if rerun.startswith(wo_ext(file)[:-2]))] + reruns.sort(key=lambda x: int(wo_ext(x).split('_')[-1])) + return originals, reruns + + +def _get_name_without_run_number(test: str) -> str: + ''' + Given a name like _ (optionally with extension), + returns . If the name doesn't end with _, then + it will return the original name. + ''' + test_split = os.path.splitext(test)[0].split('_') + if test_split[-1].isdigit(): + return '_'.join(test_split[:-1]) + return test + + +def replace_old_runs(dir: str, output_dir: str) -> None: ''' Given the directory in input (dir) that contains a set of files of original run data together with rerun data, this populates the specified output_dir @@ -149,72 +186,74 @@ def replace_old_runs(dir, output_dir): :param output_dir: directory where to write the consolidated composed features ''' os.makedirs(output_dir) - filenames_no_ext = [os.path.splitext(f)[0] for f in os.listdir( + filenames = [f for f in os.listdir( dir) if os.path.isfile(os.path.join(dir, f))] - reruns_no_ext = [ - file for file in filenames_no_ext if not file.endswith('_1')] - originals_no_ext = [file for file in filenames_no_ext if file.endswith( - '_1') and any(rerun for rerun in reruns_no_ext if rerun.startswith(file[:-2]))] - reruns_no_ext.sort(key=lambda x: int(x.split('_')[-1])) - for rerun in reruns_no_ext: - beginning = '_'.join(rerun.split('_')[:-1]) + originals, reruns = _get_original_and_rerun_list(filenames) + for rerun in reruns: + result_name = _get_name_without_run_number(rerun) original = list( - filter(lambda x: x.startswith(beginning), originals_no_ext)) + filter(lambda x: x.startswith(result_name), originals)) if len(original) != 1: raise RuntimeError( - "The rerun %s does not have a corresponding original run" % rerun) + f'The rerun {rerun} does not have a corresponding original run') tests = _replace_tests(os.path.join( - dir, original[0] + ".json"), os.path.join(dir, rerun + ".json")) - with open(os.path.join(output_dir, beginning + ".json"), 'w') as f: + dir, original[0]), os.path.join(dir, rerun)) + with open(os.path.join(output_dir, result_name + '.json'), 'w') as f: f.write(json.dumps(tests)) - for file in filenames_no_ext: - if file not in originals_no_ext: - shutil.copyfile(os.path.join(dir, file + ".json"), - os.path.join(output_dir, '_'.join(file.split('_')[:-1]) + '.json')) + + # Search for system test results that had no reruns and + # simply copy their result file to the output folder + for file in filenames: + if file not in originals and file not in reruns: + shutil.copyfile(os.path.join(dir, file), + os.path.join(output_dir, _get_name_without_run_number(file) + '.json')) -def run_attempt_type(value): +def run_attempt_type(value: Any) -> Any: if value is not int or int(value) <= 0: raise argparse.ArgumentTypeError( - "%s is invalid. Run attempts are integers and start at 1" % value) + f'{value} is invalid. Run attempts are integers and start at 1') return value if __name__ == '__main__': - parser = argparse.ArgumentParser(description=""" - Can be run in two modes: composed feature generation or composed feature consolidation + description = ''' + Can be run in two modes: composed feature generation or composed feature consolidation - Composed feature generation mode + Composed feature generation mode - Given a directory containing files with outputs of journal-analzyer.py with filenames - of format ::suite--path--:, it will construct a json - file for each : with feature-tagging information, accompanied with - additional test information. + Given a directory containing files with outputs of journal-analzyer.py with filenames + of format ::suite--path--:, it will construct a json + file for each : with feature-tagging information, accompanied with + additional test information. - Composed feature consolidation mode + Composed feature consolidation mode - Given a directory containing files of pre-composed feature information with filenames like - _.json, it writes the consolidated feature information in a new - directory (specified with the --output flag) where the latest rerun data replaces the old. - So if a file contains one test that was later rerun, the new consolidated file will contain - unaltered content from the original run except for the one test rerun that will replace - the old. - """, formatter_class=argparse.RawDescriptionHelpFormatter) + Given a directory containing files of pre-composed feature information with filenames like + :_.json, it writes the consolidated feature information in a + new directory (specified with the --output flag) where the latest rerun data replaces the old. + So if a file contains one test that was later rerun, the new consolidated file will contain + unaltered content from the original run except for the one test rerun that will replace + the old. + ''' + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-d', '--dir', type=str, required=True, help='Path to the folder containing json files') parser.add_argument('-o', '--output', type=str, help='Output directory', required=True) parser.add_argument('-s', '--scenarios', type=str, - help='Comma-separated list of scenarios', default="") + help='Comma-separated list of scenarios', default='') parser.add_argument('-e', '--env-variables', type=str, - help='Comma-separated list of environment variables as key=value', default="") + help='Comma-separated list of environment variables as key=value', default='') parser.add_argument('-f', '--failed-tests', type=str, - help='List of failed tests', default="") - parser.add_argument('--run-attempt', type=run_attempt_type, help="""Run attempt number of the json files contained in the folder [1,). + help='List of failed tests', default='') + parser.add_argument('--run-attempt', type=run_attempt_type, help=''' + Run attempt number of the json files contained in the folder [1,). Only needed when rerunning spread for failed tests. When specified, will append the run attempt number on the filename, which will then be used when running this script with the --replace-old-runs - flag to determine replacement order""") - parser.add_argument('-r', '--replace-old-runs', action="store_true", + flag to determine replacement order''') + parser.add_argument('-r', '--replace-old-runs', action='store_true', help='When set, will process pre-composed runs and consolidate them into the output dir') args = parser.parse_args() @@ -222,14 +261,13 @@ def run_attempt_type(value): replace_old_runs(args.dir, args.output) exit(0) - attempt = "" + attempt = '' if args.run_attempt: - attempt = "_%s" % args.run_attempt + attempt = '_%s' % args.run_attempt os.makedirs(args.output, exist_ok=True) systems = get_system_list(args.dir) for system in systems: composed = compose_system(dir=args.dir, system=system, failed_tests=args.failed_tests, env_variables=args.env_variables) - system = "_".join(system.split(':')) with open(os.path.join(args.output, system + attempt + '.json'), 'w') as f: - f.write(json.dumps(composed)) + json.dump(composed, f) diff --git a/tests/lib/feature_dict.py b/tests/lib/feature_dict.py new file mode 100644 index 00000000000..0612ea42992 --- /dev/null +++ b/tests/lib/feature_dict.py @@ -0,0 +1,65 @@ + +from enum import Enum +from typing import TypedDict + + +class Cmd(TypedDict): + cmd: str + + +class Endpoint(TypedDict): + method: str + path: str + action: str + + +class Interface(TypedDict): + name: str + plug_snap_type: str + slot_snap_type: str + + +class Status(str, Enum): + done = "done" + undone = "undone" + error = "error" + + +class Task(TypedDict): + kind: str + snap_type: str + last_status: Status + + +class Change(TypedDict): + kind: str + snap_type: str + + +class Ensure(TypedDict): + functions: list[str] + + +class EnvVariables(TypedDict): + name: str + value: str + +class TaskFeatures(TypedDict): + suite: str + task_name: str + variant: str + success: bool + cmds: list[Cmd] + endpoints: list[Endpoint] + interfaces: list[Interface] + tasks: list[Task] + changes: list[Change] + ensures: list[Ensure] + + +class SystemFeatures(TypedDict): + schema_version: str + system: str + scenarios: list[str] + env_variables: list[EnvVariables] + tests: list[TaskFeatures] diff --git a/tests/lib/spread/rules/feature-tagging.yaml b/tests/lib/spread/rules/feature-tagging.yaml index 6e9b2d50c06..c0f226fd393 100644 --- a/tests/lib/spread/rules/feature-tagging.yaml +++ b/tests/lib/spread/rules/feature-tagging.yaml @@ -1,3 +1,5 @@ +# This file is used by spread-filter to determine what spread tests are run +# This will run only spread tests whose task.yaml has been changed rules: tests: from: diff --git a/tests/lib/test_compose_features.py b/tests/lib/test_compose_features.py new file mode 100644 index 00000000000..2170155b69c --- /dev/null +++ b/tests/lib/test_compose_features.py @@ -0,0 +1,104 @@ + +import os +import sys +# To ensure the unit test can be run from any point in the filesystem, +# add parent folder to path to permit relative imports +sys.path.append(os.path.dirname(os.path.abspath(__file__))) + +import compose_features +from feature_dict import * +import json +import tempfile +import unittest + + +class TestCompose(unittest.TestCase): + @staticmethod + def get_features(msg): + features = {} + features['cmds'] = [Cmd(cmd=msg), Cmd(cmd=f'msg msg')] + features['endpoints'] = [Endpoint(method='POST', path=msg)] + features['interfaces'] = [Interface(name=msg)] + features['tasks'] = [ + Task(kind=msg, snap_type='snap', last_status=Status.done)] + features['changes'] = [Change(kind=msg, snap_type='snap')] + return features + + @staticmethod + def get_json(suite, task, variant, success, msg): + features = TestCompose.get_features(msg) + return TaskFeatures( + suite=suite, + task_name=task, + variant=variant, + success=success, + cmds=features['cmds'], + endpoints=features['endpoints'], + interfaces=features['interfaces'], + tasks=features['tasks'], + changes=features['changes'] + ) + + @staticmethod + def write_task(filepath, msg): + with open(filepath, 'w') as f: + json.dump(TestCompose.get_features(msg), f) + + def test_compose(self): + with tempfile.TemporaryDirectory() as tmpdir: + task1variant1 = os.path.join( + tmpdir, 'backend:system:path--to--task1:variant1.json') + task2 = os.path.join(tmpdir, 'backend:system:path--to--task2') + TestCompose.write_task(task1variant1, 'task1variant1') + TestCompose.write_task(task2, 'task2') + systems = compose_features.get_system_list(tmpdir) + self.assertEqual(1, len(systems)) + composed = compose_features.compose_system(tmpdir, systems.pop(), + 'backend:system:path/to/task1:variant1 backend:system:another/task2', + 'e=1, f=2', '1, 2, 3') + expected = SystemFeatures(schema_version='0.0.0', + system='backend:system', + scenarios=['1', '2', '3'], + env_variables=[{'name': 'e', 'value': '1'}, + {'name': 'f', 'value': '2'}], + tests=[TestCompose.get_json('path/to', 'task1', 'variant1', False, 'task1variant1'), + TestCompose.get_json('path/to', 'task2', '', True, 'task2')]) + self.assertDictEqual(expected, composed) + + +class TestReplace(unittest.TestCase): + + def test_replace(self): + with tempfile.TemporaryDirectory() as tmpdir: + original = os.path.join(tmpdir, 'my:system_1') + rerun = os.path.join(tmpdir, 'my:system_2.json') + run_once = os.path.join(tmpdir, 'my:other-system_1.json') + original_json = {'system': 'my:system', 'tests': [{'task_name': 'task1', 'suite': 'my/suite', 'variant': '', 'success': False, 'cmds': [{'cmd': 'original run'}]}, + {'task_name': 'task2', 'suite': 'my/suite', 'variant': '', 'success': True, 'cmds': [{'cmd': 'original run'}]}]} + rerun_json = {'system': 'my:system', 'tests': [ + {'task_name': 'task1', 'suite': 'my/suite', 'variant': '', 'success': True, 'cmds': [{'cmd': 'rerun 1'}, {'cmd': 'another'}]}]} + run_once_json = {'system': 'my:other-system', 'tests': [ + {'task_name': 'task', 'suite': 'my/suite', 'variant': 'v1', 'success': True}]} + with open(original, 'w') as f: + json.dump(original_json, f) + with open(rerun, 'w') as f: + json.dump(rerun_json, f) + with open(run_once, 'w') as f: + json.dump(run_once_json, f) + output_dir = 'replaced' + compose_features.replace_old_runs( + tmpdir, os.path.join(tmpdir, output_dir)) + self.assertEqual( + 2, len(os.listdir(os.path.join(tmpdir, output_dir)))) + with open(os.path.join(tmpdir, output_dir, 'my:system.json'), 'r') as f: + actual = json.load(f) + expected = {'system': 'my:system', 'tests': [{'task_name': 'task1', 'suite': 'my/suite', 'variant': '', 'success': True, 'cmds': [{'cmd': 'rerun 1'}, {'cmd': 'another'}]}, + {'task_name': 'task2', 'suite': 'my/suite', 'variant': '', 'success': True, 'cmds': [{'cmd': 'original run'}]}]} + self.assertDictEqual(expected, actual) + with open(os.path.join(tmpdir, output_dir, 'my:other-system.json'), 'r') as f: + actual = json.load(f) + self.assertDictEqual(run_once_json, actual) + + +if __name__ == '__main__': + unittest.main() From d91875e933e78507127d62909dbe5ccadc30ecbf Mon Sep 17 00:00:00 2001 From: katie Date: Thu, 27 Mar 2025 11:49:12 +0100 Subject: [PATCH 8/9] tests: address review comments --- .../{compose_features.py => featcomposer.py} | 63 ++++++++++--------- tests/lib/{feature_dict.py => features.py} | 4 ++ ...mpose_features.py => test_featcomposer.py} | 12 ++-- 3 files changed, 45 insertions(+), 34 deletions(-) rename tests/lib/{compose_features.py => featcomposer.py} (85%) rename tests/lib/{feature_dict.py => features.py} (94%) rename tests/lib/{test_compose_features.py => test_featcomposer.py} (93%) diff --git a/tests/lib/compose_features.py b/tests/lib/featcomposer.py similarity index 85% rename from tests/lib/compose_features.py rename to tests/lib/featcomposer.py index 06c0db39d1c..1ad27d55e82 100755 --- a/tests/lib/compose_features.py +++ b/tests/lib/featcomposer.py @@ -1,14 +1,18 @@ #!/usr/bin/env python3 import argparse +from collections import namedtuple import json import os import shutil from typing import Any -import feature_dict +import features -def _parse_file_name(file_name: str) -> tuple[str, str, str, str]: +SpreadTaskNames = namedtuple('SpreadTaskNames', ['original', 'suite', 'task', 'variant']) + + +def _parse_file_name(file_name: str) -> SpreadTaskNames: ''' Given a file name in the format with double slashes ::suite--path--task:variant and optionally an extension, it returns the original name, the suite name, the task name, @@ -19,7 +23,7 @@ def _parse_file_name(file_name: str) -> tuple[str, str, str, str]: - variant_name = variant :param file_name: The file name to parse - :returns: A tuple with the original name, the suite name, the task name and the variant name. If variant is not present, it returns None. + :returns: A namedtuple with the original name, the suite name, the task name and the variant name. If variant is not present, it returns None. ''' file_name = os.path.splitext(file_name)[0] original_name = file_name.replace('--', '/') @@ -30,33 +34,33 @@ def _parse_file_name(file_name: str) -> tuple[str, str, str, str]: if task_name.count(':') == 1: variant_name = task_name.split(':')[1] task_name = task_name.split(':')[0] - return original_name, suite_name, task_name, variant_name + return SpreadTaskNames(original_name, suite_name, task_name, variant_name) -def _compose_test(dir: str, file: str, failed_tests: str) -> feature_dict.TaskFeatures: +def _compose_test(dir: str, file: str, failed_tests: str) -> features.TaskFeatures: ''' Creates a dictionary with the features of a test and test information. The features are read from the file and the test information is extracted from the file name. :param dir: The directory where the file is located :param file: The file name - :param failed_tests: A list of failed tests + :param failed_tests: String containing the names of failing tests in any format :returns: A dictionary with test information and features ''' - with open(os.path.join(dir, file), 'r') as f: + with open(os.path.join(dir, file), 'r', encoding='utf-8') as f: original, suite_name, result_name, variant_name = _parse_file_name( file) - features = feature_dict.TaskFeatures( + task_features = features.TaskFeatures( suite=suite_name, task_name=result_name, variant=variant_name, success=original not in failed_tests ) - features.update(json.loads(f.read())) - return features + task_features.update(json.loads(f.read())) + return task_features -def _compose_env_variables(env_variables: str) -> list[feature_dict.EnvVariables]: +def _compose_env_variables(env_variables: list[str]) -> list[features.EnvVariables]: ''' Given environment variables in the form of a comma-separated list of key=value, it creates a list of dictionaries of [{"name": , "value": }...] @@ -65,13 +69,16 @@ def _compose_env_variables(env_variables: str) -> list[feature_dict.EnvVariables :returns: A list of dictionaries ''' composed = [] - for env in env_variables.split(',') if env_variables else []: - name, value = env.split('=') - composed.append(feature_dict.EnvVariables(name = name.strip(), value=value.strip())) + for env in env_variables: + name, sep, value = env.partition('=') + if sep != '=': + raise ValueError("Not a key=value pair {}".format(env)) + composed.append(features.EnvVariables( + name=name.strip(), value=value.strip())) return composed -def compose_system(dir: str, system: str, failed_tests: str = '', env_variables: str = '', scenarios: str = '') -> feature_dict.SystemFeatures: +def compose_system(dir: str, system: str, failed_tests: str = '', env_variables: list[str] = [], scenarios: list[str] = []) -> features.SystemFeatures: ''' Given a containing directory, a system-identifying string, and other information about failed tests, environment variables, and scenarios, it creates a dictionary @@ -80,18 +87,18 @@ def compose_system(dir: str, system: str, failed_tests: str = '', env_variables: :param dir: Directory that contains feature-tagging files :param system: Identifying string to select only files with that string - :param failed_tests: String containing the names of failing tests + :param failed_tests: String containing the names of failing tests in any format :param env_variables: Comma-separated string of key=value environment variables :param scenarios: Comma-separated string of scenario names :returns: Dictionary containing all tests and tests information for the system ''' files = [file for file in os.listdir( dir) if system in file and file.count(':') >= 2] - return feature_dict.SystemFeatures( + return features.SystemFeatures( schema_version='0.0.0', system=system, scenarios=[scenario.strip() - for scenario in scenarios.split(',')] if scenarios else [], + for scenario in scenarios] if scenarios else [], env_variables=_compose_env_variables(env_variables), tests=[_compose_test(dir, file, failed_tests) for file in files] ) @@ -110,7 +117,7 @@ def get_system_list(dir: str) -> set[str]: for file in files if file.count(':') >= 2} -def _replace_tests(old_json_file: str, new_json_file: str) -> feature_dict.SystemFeatures: +def _replace_tests(old_json_file: str, new_json_file: str) -> features.SystemFeatures: ''' The new_json_file contains a subset of the tests found in the old_json_file. This function leaves not-rerun tests untouched, while replacing old test @@ -123,9 +130,9 @@ def _replace_tests(old_json_file: str, new_json_file: str) -> feature_dict.Syste :returns: dictionary that contains the first run data with rerun tests replaced by the rerun data from the new_json_file ''' - with open(old_json_file, 'r') as f: + with open(old_json_file, 'r', encoding='utf-8') as f: old_json = json.load(f) - with open(new_json_file, 'r') as f: + with open(new_json_file, 'r', encoding='utf-8') as f: new_json = json.load(f) for test in new_json['tests']: for old_test in old_json['tests']: @@ -198,7 +205,7 @@ def replace_old_runs(dir: str, output_dir: str) -> None: f'The rerun {rerun} does not have a corresponding original run') tests = _replace_tests(os.path.join( dir, original[0]), os.path.join(dir, rerun)) - with open(os.path.join(output_dir, result_name + '.json'), 'w') as f: + with open(os.path.join(output_dir, result_name + '.json'), 'w', encoding='utf-8') as f: f.write(json.dumps(tests)) # Search for system test results that had no reruns and @@ -210,7 +217,7 @@ def replace_old_runs(dir: str, output_dir: str) -> None: def run_attempt_type(value: Any) -> Any: - if value is not int or int(value) <= 0: + if not isinstance(value, int) or int(value) <= 0: raise argparse.ArgumentTypeError( f'{value} is invalid. Run attempts are integers and start at 1') return value @@ -242,10 +249,10 @@ def run_attempt_type(value: Any) -> Any: help='Path to the folder containing json files') parser.add_argument('-o', '--output', type=str, help='Output directory', required=True) - parser.add_argument('-s', '--scenarios', type=str, - help='Comma-separated list of scenarios', default='') - parser.add_argument('-e', '--env-variables', type=str, - help='Comma-separated list of environment variables as key=value', default='') + parser.add_argument('-s', '--scenarios', type=str, nargs='*', + help='List of scenarios', default='') + parser.add_argument('-e', '--env-variables', type=str, nargs='*', + help='List of environment variables as key=value', default='') parser.add_argument('-f', '--failed-tests', type=str, help='List of failed tests', default='') parser.add_argument('--run-attempt', type=run_attempt_type, help=''' @@ -269,5 +276,5 @@ def run_attempt_type(value: Any) -> Any: for system in systems: composed = compose_system(dir=args.dir, system=system, failed_tests=args.failed_tests, env_variables=args.env_variables) - with open(os.path.join(args.output, system + attempt + '.json'), 'w') as f: + with open(os.path.join(args.output, system + attempt + '.json'), 'w', encoding='utf-8') as f: json.dump(composed, f) diff --git a/tests/lib/feature_dict.py b/tests/lib/features.py similarity index 94% rename from tests/lib/feature_dict.py rename to tests/lib/features.py index 0612ea42992..4c8afa5b58c 100644 --- a/tests/lib/feature_dict.py +++ b/tests/lib/features.py @@ -1,4 +1,8 @@ +''' +Dictionaries to specify structure of feature logs +''' + from enum import Enum from typing import TypedDict diff --git a/tests/lib/test_compose_features.py b/tests/lib/test_featcomposer.py similarity index 93% rename from tests/lib/test_compose_features.py rename to tests/lib/test_featcomposer.py index 2170155b69c..467ee27e487 100644 --- a/tests/lib/test_compose_features.py +++ b/tests/lib/test_featcomposer.py @@ -5,8 +5,8 @@ # add parent folder to path to permit relative imports sys.path.append(os.path.dirname(os.path.abspath(__file__))) -import compose_features -from feature_dict import * +import tests.lib.featcomposer as featcomposer +from features import * import json import tempfile import unittest @@ -51,11 +51,11 @@ def test_compose(self): task2 = os.path.join(tmpdir, 'backend:system:path--to--task2') TestCompose.write_task(task1variant1, 'task1variant1') TestCompose.write_task(task2, 'task2') - systems = compose_features.get_system_list(tmpdir) + systems = featcomposer.get_system_list(tmpdir) self.assertEqual(1, len(systems)) - composed = compose_features.compose_system(tmpdir, systems.pop(), + composed = featcomposer.compose_system(tmpdir, systems.pop(), 'backend:system:path/to/task1:variant1 backend:system:another/task2', - 'e=1, f=2', '1, 2, 3') + ['e = 1 ', 'f = 2 '], ['1 ', ' 2', ' 3']) expected = SystemFeatures(schema_version='0.0.0', system='backend:system', scenarios=['1', '2', '3'], @@ -86,7 +86,7 @@ def test_replace(self): with open(run_once, 'w') as f: json.dump(run_once_json, f) output_dir = 'replaced' - compose_features.replace_old_runs( + featcomposer.replace_old_runs( tmpdir, os.path.join(tmpdir, output_dir)) self.assertEqual( 2, len(os.listdir(os.path.join(tmpdir, output_dir)))) From 2f4d0a7b2e1aa9fad0281d0a4ceed1660af4cf90 Mon Sep 17 00:00:00 2001 From: katie Date: Thu, 27 Mar 2025 17:39:43 +0100 Subject: [PATCH 9/9] github: add feature extract to workflow --- .github/workflows/spread-tests.yaml | 19 +++++++++++++++++-- run-spread | 19 ++++++++++++++++++- tests/lib/tools/feature_extractor.py | 2 +- 3 files changed, 36 insertions(+), 4 deletions(-) diff --git a/.github/workflows/spread-tests.yaml b/.github/workflows/spread-tests.yaml index 92a5937359c..c413ffd6cd2 100644 --- a/.github/workflows/spread-tests.yaml +++ b/.github/workflows/spread-tests.yaml @@ -401,12 +401,27 @@ jobs: - name: Analyze feature tags if: always() && env.SPREAD_TAG_FEATURES != '' run: | + featarg="" + for feature in ${SPREAD_TAG_FEATURES}; do + featarg+="-f $feature " + done + featdir="${ARTIFACTS_FOLDER}/extracted-tags" + mkdir -p "$featdir" + for dir in "${ARTIFACTS_FOLDER}/feature-tags"/*/; do + if [ -f "${dir}/journal.txt" ] && [ -f "${dir}/state.json" ]; then + ./tests/lib/tools/feature_extractor.py \ + $featarg \ + --journal "${dir}/journal.txt" \ + --state "${dir}/state.json" \ + --output "$featdir/$(basename ${dir})" + fi + done ./tests/lib/compose-features.py \ - --dir "${ARTIFACTS_FOLDER}/feature-tags" \ + --dir "$featdir" \ --output "feature-tags" \ --failed-tests "$(cat $FAILED_TESTS_FILE)" \ --run-attempt ${{ github.run_attempt }} \ - --env-variables "SPREAD_EXPERIMENTAL_FEATURES=${SPREAD_EXPERIMENTAL_FEATURES},SPREAD_SNAPD_DEB_FROM_REPO=${SPREAD_SNAPD_DEB_FROM_REPO}" + --env-variables SPREAD_EXPERIMENTAL_FEATURES=${SPREAD_EXPERIMENTAL_FEATURES} SPREAD_SNAPD_DEB_FROM_REPO=${SPREAD_SNAPD_DEB_FROM_REPO} - name: Upload feature tags if: always() && env.SPREAD_TAG_FEATURES != '' diff --git a/run-spread b/run-spread index 6fdd245e71b..f0b4d10bac7 100644 --- a/run-spread +++ b/run-spread @@ -47,8 +47,25 @@ else touch "${WRITE_DIR}/failed-tests.txt" fi + featarg="" + for feature in ${SPREAD_TAG_FEATURES}; do + featarg+="-f $feature " + done + featdir="${WRITE_DIR}/extracted-tags" + mkdir -p "$featdir" + for dir in "${WRITE_DIR}/feature-tags"/*/; do + if [ -f "${dir}/journal.txt" ] && [ -f "${dir}/state.json" ]; then + #shellcheck disable=SC2086 + ./tests/lib/tools/feature_extractor.py \ + $featarg \ + --journal "${dir}/journal.txt" \ + --state "${dir}/state.json" \ + --output "$featdir/$(basename "${dir}")" + fi + done + ./tests/lib/compose-features.py \ - --dir ${WRITE_DIR}/features-artifacts/feature-tags \ + --dir ${featdir} \ --output ${WRITE_DIR}/composed-feature-tags \ --failed-tests "$(cat ${WRITE_DIR}/failed-tests.txt)" \ --run-attempt "${i}" diff --git a/tests/lib/tools/feature_extractor.py b/tests/lib/tools/feature_extractor.py index eee4e799dcc..a41e696b6e0 100755 --- a/tests/lib/tools/feature_extractor.py +++ b/tests/lib/tools/feature_extractor.py @@ -64,7 +64,7 @@ def get_feature_dictionary(log_file: TextIO, feature_list: list[str], state_json try: state_json = json.load(args.state) - feature_dictionary = get_feature_dictionary(args.journal, args.features, state_json) + feature_dictionary = get_feature_dictionary(args.journal, args.feature, state_json) json.dump(feature_dictionary, open(args.output, "w")) except json.JSONDecodeError: raise RuntimeError("The state.json is not valid json")