diff --git a/.github/workflows/feature-tagging.yaml b/.github/workflows/feature-tagging.yaml new file mode 100644 index 00000000000..4eddf8fc841 --- /dev/null +++ b/.github/workflows/feature-tagging.yaml @@ -0,0 +1,183 @@ +name: Feature Tagging + +on: + push: + branches: ["master"] + + workflow_dispatch: + inputs: + features: + type: string + description: 'Comma-separated list of features to tag' + default: 'all' + maximum-reruns: + type: number + description: 'Maximum number of times to rerun failed spread tasks upon failure' + default: 3 + run-all: + type: boolean + description: 'If true, will run all spread tests. If false, will only run tagging on changed spread tests in the last commit' + default: false + +jobs: + set-inputs: + runs-on: ubuntu-latest + outputs: + features: ${{ steps.step1.outputs.features }} + maximum-reruns: ${{ steps.step1.outputs.maximum-reruns }} + run-all: ${{ steps.step1.outputs.run-all }} + steps: + - name: Set inputs + run: | + echo "features=${{ inputs.features || 'all' }}" >> $GITHUB_OUTPUT + echo "maximum-reruns=${{ inputs.maximum-reruns || 3 }}" >> $GITHUB_OUTPUT + echo "run-all=${{ inputs.run-all || false }}" >> $GITHUB_OUTPUT + + read-systems: + runs-on: ubuntu-latest + outputs: + fundamental-systems: ${{ steps.read-systems.outputs.fundamental-systems }} + non-fundamental-systems: ${{ steps.read-systems.outputs.non-fundamental-systems }} + nested-systems: ${{ steps.read-systems.outputs.nested-systems }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Read matrix file + id: read-systems + shell: bash + run: | + echo "fundamental-systems=$(jq -c . ./.github/workflows/fundamental-systems.json)" >> $GITHUB_OUTPUT + echo "non-fundamental-systems=$(jq -c . ./.github/workflows/non-fundamental-systems.json)" >> $GITHUB_OUTPUT + echo "nested-systems=$(jq -c . ./.github/workflows/nested-systems.json)" >> $GITHUB_OUTPUT + + tag-features-fundamental: + uses: ./.github/workflows/spread-tests.yaml + needs: [set-inputs, read-systems] + name: "spread ${{ matrix.group }}" + with: + runs-on: '["self-hosted", "spread-enabled"]' + group: ${{ matrix.group }} + backend: ${{ matrix.backend }} + systems: ${{ matrix.systems }} + tasks: ${{ matrix.tasks }} + rules: ${{ fromJSON(needs.set-inputs.outputs.run-all) && matrix.rules || 'feature-tagging.yaml' }} + is-fundamental: true + use-snapd-snap-from-master: true + spread-tag-features: ${{ needs.set-inputs.outputs.features }} + force-use-ruleset: ${{ ! fromJSON(needs.set-inputs.outputs.run-all) }} + strategy: + fail-fast: false + matrix: ${{ fromJson(needs.read-systems.outputs.fundamental-systems) }} + + tag-features-non-fundamental: + uses: ./.github/workflows/spread-tests.yaml + needs: [set-inputs, read-systems] + name: "spread ${{ matrix.group }}" + with: + runs-on: '["self-hosted", "spread-enabled"]' + group: ${{ matrix.group }} + backend: ${{ matrix.backend }} + systems: ${{ matrix.systems }} + tasks: ${{ matrix.tasks }} + rules: ${{ fromJSON(needs.set-inputs.outputs.run-all) && matrix.rules || 'feature-tagging.yaml' }} + use-snapd-snap-from-master: true + spread-tag-features: ${{ needs.set-inputs.outputs.features }} + force-use-ruleset: ${{ ! fromJSON(needs.set-inputs.outputs.run-all) }} + strategy: + fail-fast: false + matrix: ${{ fromJson(needs.read-systems.outputs.non-fundamental-systems) }} + + tag-features-nested: + uses: ./.github/workflows/spread-tests.yaml + needs: [set-inputs, read-systems] + name: "spread ${{ matrix.group }}" + with: + runs-on: '["self-hosted", "spread-enabled"]' + group: ${{ matrix.group }} + backend: ${{ matrix.backend }} + systems: ${{ matrix.systems }} + tasks: ${{ matrix.tasks }} + rules: ${{ fromJSON(needs.set-inputs.outputs.run-all) && matrix.rules || 'feature-tagging.yaml' }} + use-snapd-snap-from-master: true + spread-tag-features: ${{ needs.set-inputs.outputs.features }} + force-use-ruleset: ${{ ! fromJSON(needs.set-inputs.outputs.run-all) }} + strategy: + fail-fast: false + matrix: ${{ fromJson(needs.read-systems.outputs.nested-systems) }} + + re-run: + permissions: + actions: write + needs: [set-inputs, tag-features-fundamental, tag-features-non-fundamental, tag-features-nested] + # If the spread tests ended in failure, rerun the workflow up to maximum-reruns-1 times + if: failure() && fromJSON(github.run_attempt) < fromJSON(needs.set-inputs.outputs.maximum-reruns) + runs-on: ubuntu-latest + steps: + - env: + GH_REPO: ${{ github.repository }} + GH_TOKEN: ${{ github.token }} + run: gh workflow run rerun.yaml -F run_id=${{ github.run_id }} + + create-reports: + needs: [set-inputs, tag-features-fundamental, tag-features-non-fundamental, tag-features-nested] + runs-on: ubuntu-latest + if: success() || fromJSON(github.run_attempt) >= fromJSON(needs.set-inputs.outputs.maximum-reruns) + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Get generated data + uses: actions/github-script@v6 + with: + script: | + let page = 1; + let per_page = 100; + let allArtifacts = []; + let response; + do { + response = await github.rest.actions.listWorkflowRunArtifacts({ + owner: context.repo.owner, + repo: context.repo.repo, + run_id: context.runId, + per_page: per_page, + page: page + }); + allArtifacts = allArtifacts.concat(response.data.artifacts); + page++; + } while (response.data.artifacts.length === per_page); + + let matchingArtifacts = allArtifacts.filter((artifact) => { + return artifact.name.startsWith(`feature-tags`); + }); + + for (let artifact of matchingArtifacts) { + let download = await github.rest.actions.downloadArtifact({ + owner: context.repo.owner, + repo: context.repo.repo, + artifact_id: artifact.id, + archive_format: 'zip', + }); + let fs = require('fs'); + fs.writeFileSync(`${process.env.GITHUB_WORKSPACE}/${artifact.name}.zip`, Buffer.from(download.data)); + console.log(`Downloaded artifact: ${artifact.name}.zip`); + } + - name: Unzip artifacts + run: | + mkdir -p feature-tags-artifacts + find . -name "feature-tags*.zip" | while read filename; do + unzip "$filename" -d "feature-tags-artifacts" + done + + - name: Consolidate feature data + run: | + ./tests/lib/compose-features.py \ + --dir "feature-tags-artifacts" \ + --output "final-feature-tags" \ + --replace-old-runs + + - name: Upload feature data + uses: actions/upload-artifact@v4 + with: + name: "feature-tags" + path: "final-feature-tags" diff --git a/.github/workflows/rerun.yaml b/.github/workflows/rerun.yaml new file mode 100644 index 00000000000..206f9cc6038 --- /dev/null +++ b/.github/workflows/rerun.yaml @@ -0,0 +1,18 @@ +on: + workflow_dispatch: + inputs: + run_id: + required: true +jobs: + rerun: + permissions: + actions: write + runs-on: ubuntu-latest + steps: + - name: rerun ${{ inputs.run_id }} + env: + GH_REPO: ${{ github.repository }} + GH_TOKEN: ${{ github.token }} + run: | + gh run watch ${{ inputs.run_id }} > /dev/null 2>&1 + gh run rerun ${{ inputs.run_id }} --failed diff --git a/.github/workflows/spread-tests.yaml b/.github/workflows/spread-tests.yaml index ac294396540..c413ffd6cd2 100644 --- a/.github/workflows/spread-tests.yaml +++ b/.github/workflows/spread-tests.yaml @@ -5,6 +5,11 @@ on: description: 'A json list of tags to indicate which runner to use' required: true type: string + force-use-ruleset: + description: 'If true, will force the use of the rulesset specified in the rules input' + required: false + default: false + type: boolean group: description: 'The name of the group of backends, systems, tests, and rules' required: true @@ -47,12 +52,17 @@ on: description: 'Comma-separated list of experimental snapd features to enable with: snap set system "experimental.=true"' required: false type: string + spread-tag-features: + description: 'If specified, will tag the spread results with the specified features (comma-separated)' + required: false + type: string jobs: run-spread: env: SPREAD_EXPERIMENTAL_FEATURES: ${{ inputs.spread-experimental-features }} + SPREAD_TAG_FEATURES: ${{ inputs.spread-tag-features }} runs-on: ${{ fromJSON(inputs.runs-on) }} steps: @@ -146,7 +156,11 @@ jobs: # The tests are just filtered when the change is a PR # When 'Run Nested' label is added in a PR, all the nested tests have to be executed TASKS_TO_RUN="" - if [ -z "${{ github.event.number }}" ] || [ "$RUN_NESTED" = 'true' ] || [ -z "${{ inputs.rules }}" ] || [ -z "$changes_param" ]; then + if [ "${{ inputs.force-use-ruleset }}" == "false" ] && \ + ( [ -z "${{ github.event.number }}" ] || \ + [ "$RUN_NESTED" = 'true' ] || \ + [ -z "${{ inputs.rules }}" ] || \ + [ -z "$changes_param" ] ); then for TASKS in ${{ inputs.tasks }}; do TASKS_TO_RUN="$TASKS_TO_RUN $prefix:$TASKS" done @@ -306,6 +320,12 @@ jobs: exit 0 fi + SPREAD_FLAGS='-no-debug-output -logs spread-logs' + if [ -n "$SPREAD_TAG_FEATURES" ]; then + SPREAD_FLAGS="$SPREAD_FLAGS -artifacts spread-artifacts" + echo "ARTIFACTS_FOLDER=spread-artifacts" >> $GITHUB_ENV + fi + # Run spread tests # "pipefail" ensures that a non-zero status from the spread is # propagated; and we use a subshell as this option could trigger @@ -313,7 +333,7 @@ jobs: echo "Running command: $SPREAD $RUN_TESTS" ( set -o pipefail - $SPREAD -no-debug-output -logs spread-logs $RUN_TESTS | \ + $SPREAD $SPREAD_FLAGS $RUN_TESTS | \ ./tests/lib/external/snapd-testing-tools/utils/log-filter $FILTER_PARAMS | \ tee spread.log ) @@ -378,6 +398,38 @@ jobs: echo "TEST_FAILED=true" >> $GITHUB_ENV fi + - name: Analyze feature tags + if: always() && env.SPREAD_TAG_FEATURES != '' + run: | + featarg="" + for feature in ${SPREAD_TAG_FEATURES}; do + featarg+="-f $feature " + done + featdir="${ARTIFACTS_FOLDER}/extracted-tags" + mkdir -p "$featdir" + for dir in "${ARTIFACTS_FOLDER}/feature-tags"/*/; do + if [ -f "${dir}/journal.txt" ] && [ -f "${dir}/state.json" ]; then + ./tests/lib/tools/feature_extractor.py \ + $featarg \ + --journal "${dir}/journal.txt" \ + --state "${dir}/state.json" \ + --output "$featdir/$(basename ${dir})" + fi + done + ./tests/lib/compose-features.py \ + --dir "$featdir" \ + --output "feature-tags" \ + --failed-tests "$(cat $FAILED_TESTS_FILE)" \ + --run-attempt ${{ github.run_attempt }} \ + --env-variables SPREAD_EXPERIMENTAL_FEATURES=${SPREAD_EXPERIMENTAL_FEATURES} SPREAD_SNAPD_DEB_FROM_REPO=${SPREAD_SNAPD_DEB_FROM_REPO} + + - name: Upload feature tags + if: always() && env.SPREAD_TAG_FEATURES != '' + uses: actions/upload-artifact@v4 + with: + name: "feature-tags-${{ inputs.group }}-${{ inputs.systems }}_${{ github.run_attempt }}" + path: "feature-tags" + - name: Save spread test results to cache if: always() uses: actions/cache/save@v4 diff --git a/run-spread b/run-spread old mode 100755 new mode 100644 index c7cc7cd94a5..f0b4d10bac7 --- a/run-spread +++ b/run-spread @@ -6,27 +6,81 @@ need_rebuild=1 shopt -s nullglob if [ "${NO_REBUILD:-0}" = "1" ]; then - echo "-- $(date) -- requested no snap rebuild" - need_rebuild=0 - - # check if we have any snaps built at all - built_snaps=(built-snap/snapd_*.snap.keep) - if (( "${#built_snaps[@]}" > 0 )); then - echo "-- $(date) -- found prebuilt snapd snaps:" - for s in "${built_snaps[@]}"; do - echo "-- $s" - done - else - echo "-- $(date) -- no prebuilt snaps found" - need_rebuild=1 - fi + echo "-- $(date) -- requested no snap rebuild" + need_rebuild=0 + + # check if we have any snaps built at all + built_snaps=(built-snap/snapd_*.snap.keep) + if (("${#built_snaps[@]}" > 0)); then + echo "-- $(date) -- found prebuilt snapd snaps:" + for s in "${built_snaps[@]}"; do + echo "-- $s" + done + else + echo "-- $(date) -- no prebuilt snaps found" + need_rebuild=1 + fi fi if [ "$need_rebuild" = 1 ]; then - echo "-- $(date) -- rebuilding snapd snap" - ./tests/build-test-snapd-snap - echo "-- $(date) -- snapd snap rebuild complete" + echo "-- $(date) -- rebuilding snapd snap" + ./tests/build-test-snapd-snap + echo "-- $(date) -- snapd snap rebuild complete" fi -# Run spread -SPREAD_USE_PREBUILT_SNAPD_SNAP=true exec spread "$@" +if [ -z "$SPREAD_TAG_FEATURES" ]; then + SPREAD_USE_PREBUILT_SNAPD_SNAP=true exec spread "$@" +else + WRITE_DIR="/tmp/features" + RUN_TESTS=("$@") + NUM_ATTEMPTS=${NUM_ATTEMPTS:-1} + export SPREAD_USE_PREBUILT_SNAPD_SNAP=true + mkdir -p "$WRITE_DIR" + for i in $(seq 1 "$NUM_ATTEMPTS"); do + + spread -artifacts "${WRITE_DIR}"/features-artifacts -no-debug-output "${RUN_TESTS[@]}" | tee "${WRITE_DIR}/spread-logs.txt" + + if [ -f "$WRITE_DIR"/spread-logs.txt ]; then + ./tests/lib/external/snapd-testing-tools/utils/log-parser "${WRITE_DIR}"/spread-logs.txt --output "${WRITE_DIR}"/spread-results.json + ./tests/lib/external/snapd-testing-tools/utils/log-analyzer list-reexecute-tasks "${RUN_TESTS[@]}" "${WRITE_DIR}"/spread-results.json >"${WRITE_DIR}"/failed-tests.txt + else + touch "${WRITE_DIR}/failed-tests.txt" + fi + + featarg="" + for feature in ${SPREAD_TAG_FEATURES}; do + featarg+="-f $feature " + done + featdir="${WRITE_DIR}/extracted-tags" + mkdir -p "$featdir" + for dir in "${WRITE_DIR}/feature-tags"/*/; do + if [ -f "${dir}/journal.txt" ] && [ -f "${dir}/state.json" ]; then + #shellcheck disable=SC2086 + ./tests/lib/tools/feature_extractor.py \ + $featarg \ + --journal "${dir}/journal.txt" \ + --state "${dir}/state.json" \ + --output "$featdir/$(basename "${dir}")" + fi + done + + ./tests/lib/compose-features.py \ + --dir ${featdir} \ + --output ${WRITE_DIR}/composed-feature-tags \ + --failed-tests "$(cat ${WRITE_DIR}/failed-tests.txt)" \ + --run-attempt "${i}" + + if [ ! -s "${WRITE_DIR}/failed-tests.txt" ]; then + break + fi + + mapfile RUN_TESTS <"${WRITE_DIR}"/failed-tests.txt + done + + ./tests/lib/compose-features.py \ + --dir ${WRITE_DIR}/composed-feature-tags \ + --output ${WRITE_DIR}/final-feature-tags \ + --replace-old-runs + + echo "Your feature tags can be found in $WRITE_DIR/final-feature-tags" +fi diff --git a/tests/lib/featcomposer.py b/tests/lib/featcomposer.py new file mode 100755 index 00000000000..1ad27d55e82 --- /dev/null +++ b/tests/lib/featcomposer.py @@ -0,0 +1,280 @@ +#!/usr/bin/env python3 + +import argparse +from collections import namedtuple +import json +import os +import shutil +from typing import Any +import features + + +SpreadTaskNames = namedtuple('SpreadTaskNames', ['original', 'suite', 'task', 'variant']) + + +def _parse_file_name(file_name: str) -> SpreadTaskNames: + ''' + Given a file name in the format with double slashes ::suite--path--task:variant + and optionally an extension, it returns the original name, the suite name, the task name, + and the variant name. So in the example, it returns: + - original_name = ::suite/path/task:variant + - suite_name = suite/path + - task_name = task + - variant_name = variant + + :param file_name: The file name to parse + :returns: A namedtuple with the original name, the suite name, the task name and the variant name. If variant is not present, it returns None. + ''' + file_name = os.path.splitext(file_name)[0] + original_name = file_name.replace('--', '/') + task = ':'.join(original_name.split(':')[2:]) + suite_name = '/'.join(task.split('/')[:-1]) + task_name = task.split('/')[-1] + variant_name = '' + if task_name.count(':') == 1: + variant_name = task_name.split(':')[1] + task_name = task_name.split(':')[0] + return SpreadTaskNames(original_name, suite_name, task_name, variant_name) + + +def _compose_test(dir: str, file: str, failed_tests: str) -> features.TaskFeatures: + ''' + Creates a dictionary with the features of a test and test information. + The features are read from the file and the test information is extracted from the file name. + + :param dir: The directory where the file is located + :param file: The file name + :param failed_tests: String containing the names of failing tests in any format + :returns: A dictionary with test information and features + ''' + with open(os.path.join(dir, file), 'r', encoding='utf-8') as f: + original, suite_name, result_name, variant_name = _parse_file_name( + file) + task_features = features.TaskFeatures( + suite=suite_name, + task_name=result_name, + variant=variant_name, + success=original not in failed_tests + ) + task_features.update(json.loads(f.read())) + return task_features + + +def _compose_env_variables(env_variables: list[str]) -> list[features.EnvVariables]: + ''' + Given environment variables in the form of a comma-separated list of key=value, + it creates a list of dictionaries of [{"name": , "value": }...] + + :param env_variables: a comma-seprated list of key=value environment variables + :returns: A list of dictionaries + ''' + composed = [] + for env in env_variables: + name, sep, value = env.partition('=') + if sep != '=': + raise ValueError("Not a key=value pair {}".format(env)) + composed.append(features.EnvVariables( + name=name.strip(), value=value.strip())) + return composed + + +def compose_system(dir: str, system: str, failed_tests: str = '', env_variables: list[str] = [], scenarios: list[str] = []) -> features.SystemFeatures: + ''' + Given a containing directory, a system-identifying string, and other information + about failed tests, environment variables, and scenarios, it creates a dictionary + containing the feature information found in the files contained in the directory + for that system. + + :param dir: Directory that contains feature-tagging files + :param system: Identifying string to select only files with that string + :param failed_tests: String containing the names of failing tests in any format + :param env_variables: Comma-separated string of key=value environment variables + :param scenarios: Comma-separated string of scenario names + :returns: Dictionary containing all tests and tests information for the system + ''' + files = [file for file in os.listdir( + dir) if system in file and file.count(':') >= 2] + return features.SystemFeatures( + schema_version='0.0.0', + system=system, + scenarios=[scenario.strip() + for scenario in scenarios] if scenarios else [], + env_variables=_compose_env_variables(env_variables), + tests=[_compose_test(dir, file, failed_tests) for file in files] + ) + + +def get_system_list(dir: str) -> set[str]: + ''' + Constructs a list of all systems from the filenames in the specified directory + + :param dir: Directory containing feature-tagging information for tests + :returns: Set of identifying strings for systems + ''' + files = [f for f in os.listdir(dir) + if os.path.isfile(os.path.join(dir, f))] + return {':'.join(file.split(':')[:2]) + for file in files if file.count(':') >= 2} + + +def _replace_tests(old_json_file: str, new_json_file: str) -> features.SystemFeatures: + ''' + The new_json_file contains a subset of the tests found in the old_json_file. + This function leaves not-rerun tests untouched, while replacing old test + runs with their rerun counterparts found in new_json_file. The resulting + json in output therefore contains a mix of tests that were not rerun and + the latest version of tests that were rerun. + + :param old_json_file: file path of first run of composed features + :param new_json_file: file path of rerun of composed features + :returns: dictionary that contains the first run data with rerun tests + replaced by the rerun data from the new_json_file + ''' + with open(old_json_file, 'r', encoding='utf-8') as f: + old_json = json.load(f) + with open(new_json_file, 'r', encoding='utf-8') as f: + new_json = json.load(f) + for test in new_json['tests']: + for old_test in old_json['tests']: + if old_test['task_name'] == test['task_name'] and old_test['suite'] == test['suite'] and old_test['variant'] == test['variant']: + old_test.clear() + for key, value in test.items(): + old_test[key] = value + break + return old_json + + +def _get_original_and_rerun_list(filenames: list[str]) -> tuple[list[str], list[str]]: + ''' + Given a list of filenames, gets two lists of rerun information: + the first list contains the first run (of systems that were rerun) + while the second list contains all reruns, sorted from earliest to latest. + + Note: the list of first runs ONLY contains the first run of reruns; + it does not contain systems that had no rerun. + + :param filenames: a list of filenames + :returns: the list of first runs and the list of all reruns + ''' + def wo_ext(x): return os.path.splitext(x)[0] + reruns = [file for file in filenames if not wo_ext(file).endswith('_1')] + originals = [file for file in filenames + if wo_ext(file).endswith('_1') and + any(rerun for rerun in reruns if rerun.startswith(wo_ext(file)[:-2]))] + reruns.sort(key=lambda x: int(wo_ext(x).split('_')[-1])) + return originals, reruns + + +def _get_name_without_run_number(test: str) -> str: + ''' + Given a name like _ (optionally with extension), + returns . If the name doesn't end with _, then + it will return the original name. + ''' + test_split = os.path.splitext(test)[0].split('_') + if test_split[-1].isdigit(): + return '_'.join(test_split[:-1]) + return test + + +def replace_old_runs(dir: str, output_dir: str) -> None: + ''' + Given the directory in input (dir) that contains a set of files of original + run data together with rerun data, this populates the specified output_dir + with a consolidated set of composed features, one per system. An original + composed features file is a file that ends in _1.json. A rerun composed + features file is a file that ends in _.json where is greater + than 1. The numbering is automatically generated when the compose features + script was called with the --run-attempt + + + :param dir: directory containing composed feature files with varying run + attempt numbers + :param output_dir: directory where to write the consolidated composed features + ''' + os.makedirs(output_dir) + filenames = [f for f in os.listdir( + dir) if os.path.isfile(os.path.join(dir, f))] + originals, reruns = _get_original_and_rerun_list(filenames) + for rerun in reruns: + result_name = _get_name_without_run_number(rerun) + original = list( + filter(lambda x: x.startswith(result_name), originals)) + if len(original) != 1: + raise RuntimeError( + f'The rerun {rerun} does not have a corresponding original run') + tests = _replace_tests(os.path.join( + dir, original[0]), os.path.join(dir, rerun)) + with open(os.path.join(output_dir, result_name + '.json'), 'w', encoding='utf-8') as f: + f.write(json.dumps(tests)) + + # Search for system test results that had no reruns and + # simply copy their result file to the output folder + for file in filenames: + if file not in originals and file not in reruns: + shutil.copyfile(os.path.join(dir, file), + os.path.join(output_dir, _get_name_without_run_number(file) + '.json')) + + +def run_attempt_type(value: Any) -> Any: + if not isinstance(value, int) or int(value) <= 0: + raise argparse.ArgumentTypeError( + f'{value} is invalid. Run attempts are integers and start at 1') + return value + + +if __name__ == '__main__': + description = ''' + Can be run in two modes: composed feature generation or composed feature consolidation + + Composed feature generation mode + + Given a directory containing files with outputs of journal-analzyer.py with filenames + of format ::suite--path--:, it will construct a json + file for each : with feature-tagging information, accompanied with + additional test information. + + Composed feature consolidation mode + + Given a directory containing files of pre-composed feature information with filenames like + :_.json, it writes the consolidated feature information in a + new directory (specified with the --output flag) where the latest rerun data replaces the old. + So if a file contains one test that was later rerun, the new consolidated file will contain + unaltered content from the original run except for the one test rerun that will replace + the old. + ''' + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument('-d', '--dir', type=str, required=True, + help='Path to the folder containing json files') + parser.add_argument('-o', '--output', type=str, + help='Output directory', required=True) + parser.add_argument('-s', '--scenarios', type=str, nargs='*', + help='List of scenarios', default='') + parser.add_argument('-e', '--env-variables', type=str, nargs='*', + help='List of environment variables as key=value', default='') + parser.add_argument('-f', '--failed-tests', type=str, + help='List of failed tests', default='') + parser.add_argument('--run-attempt', type=run_attempt_type, help=''' + Run attempt number of the json files contained in the folder [1,). + Only needed when rerunning spread for failed tests. When specified, will append the run attempt + number on the filename, which will then be used when running this script with the --replace-old-runs + flag to determine replacement order''') + parser.add_argument('-r', '--replace-old-runs', action='store_true', + help='When set, will process pre-composed runs and consolidate them into the output dir') + args = parser.parse_args() + + if args.replace_old_runs: + replace_old_runs(args.dir, args.output) + exit(0) + + attempt = '' + if args.run_attempt: + attempt = '_%s' % args.run_attempt + os.makedirs(args.output, exist_ok=True) + systems = get_system_list(args.dir) + for system in systems: + composed = compose_system(dir=args.dir, system=system, + failed_tests=args.failed_tests, env_variables=args.env_variables) + with open(os.path.join(args.output, system + attempt + '.json'), 'w', encoding='utf-8') as f: + json.dump(composed, f) diff --git a/tests/lib/features.py b/tests/lib/features.py new file mode 100644 index 00000000000..4c8afa5b58c --- /dev/null +++ b/tests/lib/features.py @@ -0,0 +1,69 @@ + +''' +Dictionaries to specify structure of feature logs +''' + +from enum import Enum +from typing import TypedDict + + +class Cmd(TypedDict): + cmd: str + + +class Endpoint(TypedDict): + method: str + path: str + action: str + + +class Interface(TypedDict): + name: str + plug_snap_type: str + slot_snap_type: str + + +class Status(str, Enum): + done = "done" + undone = "undone" + error = "error" + + +class Task(TypedDict): + kind: str + snap_type: str + last_status: Status + + +class Change(TypedDict): + kind: str + snap_type: str + + +class Ensure(TypedDict): + functions: list[str] + + +class EnvVariables(TypedDict): + name: str + value: str + +class TaskFeatures(TypedDict): + suite: str + task_name: str + variant: str + success: bool + cmds: list[Cmd] + endpoints: list[Endpoint] + interfaces: list[Interface] + tasks: list[Task] + changes: list[Change] + ensures: list[Ensure] + + +class SystemFeatures(TypedDict): + schema_version: str + system: str + scenarios: list[str] + env_variables: list[EnvVariables] + tests: list[TaskFeatures] diff --git a/tests/lib/spread/rules/feature-tagging.yaml b/tests/lib/spread/rules/feature-tagging.yaml new file mode 100644 index 00000000000..c0f226fd393 --- /dev/null +++ b/tests/lib/spread/rules/feature-tagging.yaml @@ -0,0 +1,20 @@ +# This file is used by spread-filter to determine what spread tests are run +# This will run only spread tests whose task.yaml has been changed +rules: + tests: + from: + - tests/main/.*/task.yaml + - tests/core/.*/task.yaml + - tests/completion/.*/task.yaml + - tests/cross/.*/task.yaml + - tests/regression/.*/task.yaml + - tests/smoke/.*/task.yaml + - tests/unit/.*/task.yaml + - tests/upgrade/.*/task.yaml + - tests/fips/.*/task.yaml + - tests/nested/.*/task.yaml + to: [$SELF] + + rest: + from: [.*] + to: [$NONE] diff --git a/tests/lib/test_featcomposer.py b/tests/lib/test_featcomposer.py new file mode 100644 index 00000000000..467ee27e487 --- /dev/null +++ b/tests/lib/test_featcomposer.py @@ -0,0 +1,104 @@ + +import os +import sys +# To ensure the unit test can be run from any point in the filesystem, +# add parent folder to path to permit relative imports +sys.path.append(os.path.dirname(os.path.abspath(__file__))) + +import tests.lib.featcomposer as featcomposer +from features import * +import json +import tempfile +import unittest + + +class TestCompose(unittest.TestCase): + @staticmethod + def get_features(msg): + features = {} + features['cmds'] = [Cmd(cmd=msg), Cmd(cmd=f'msg msg')] + features['endpoints'] = [Endpoint(method='POST', path=msg)] + features['interfaces'] = [Interface(name=msg)] + features['tasks'] = [ + Task(kind=msg, snap_type='snap', last_status=Status.done)] + features['changes'] = [Change(kind=msg, snap_type='snap')] + return features + + @staticmethod + def get_json(suite, task, variant, success, msg): + features = TestCompose.get_features(msg) + return TaskFeatures( + suite=suite, + task_name=task, + variant=variant, + success=success, + cmds=features['cmds'], + endpoints=features['endpoints'], + interfaces=features['interfaces'], + tasks=features['tasks'], + changes=features['changes'] + ) + + @staticmethod + def write_task(filepath, msg): + with open(filepath, 'w') as f: + json.dump(TestCompose.get_features(msg), f) + + def test_compose(self): + with tempfile.TemporaryDirectory() as tmpdir: + task1variant1 = os.path.join( + tmpdir, 'backend:system:path--to--task1:variant1.json') + task2 = os.path.join(tmpdir, 'backend:system:path--to--task2') + TestCompose.write_task(task1variant1, 'task1variant1') + TestCompose.write_task(task2, 'task2') + systems = featcomposer.get_system_list(tmpdir) + self.assertEqual(1, len(systems)) + composed = featcomposer.compose_system(tmpdir, systems.pop(), + 'backend:system:path/to/task1:variant1 backend:system:another/task2', + ['e = 1 ', 'f = 2 '], ['1 ', ' 2', ' 3']) + expected = SystemFeatures(schema_version='0.0.0', + system='backend:system', + scenarios=['1', '2', '3'], + env_variables=[{'name': 'e', 'value': '1'}, + {'name': 'f', 'value': '2'}], + tests=[TestCompose.get_json('path/to', 'task1', 'variant1', False, 'task1variant1'), + TestCompose.get_json('path/to', 'task2', '', True, 'task2')]) + self.assertDictEqual(expected, composed) + + +class TestReplace(unittest.TestCase): + + def test_replace(self): + with tempfile.TemporaryDirectory() as tmpdir: + original = os.path.join(tmpdir, 'my:system_1') + rerun = os.path.join(tmpdir, 'my:system_2.json') + run_once = os.path.join(tmpdir, 'my:other-system_1.json') + original_json = {'system': 'my:system', 'tests': [{'task_name': 'task1', 'suite': 'my/suite', 'variant': '', 'success': False, 'cmds': [{'cmd': 'original run'}]}, + {'task_name': 'task2', 'suite': 'my/suite', 'variant': '', 'success': True, 'cmds': [{'cmd': 'original run'}]}]} + rerun_json = {'system': 'my:system', 'tests': [ + {'task_name': 'task1', 'suite': 'my/suite', 'variant': '', 'success': True, 'cmds': [{'cmd': 'rerun 1'}, {'cmd': 'another'}]}]} + run_once_json = {'system': 'my:other-system', 'tests': [ + {'task_name': 'task', 'suite': 'my/suite', 'variant': 'v1', 'success': True}]} + with open(original, 'w') as f: + json.dump(original_json, f) + with open(rerun, 'w') as f: + json.dump(rerun_json, f) + with open(run_once, 'w') as f: + json.dump(run_once_json, f) + output_dir = 'replaced' + featcomposer.replace_old_runs( + tmpdir, os.path.join(tmpdir, output_dir)) + self.assertEqual( + 2, len(os.listdir(os.path.join(tmpdir, output_dir)))) + with open(os.path.join(tmpdir, output_dir, 'my:system.json'), 'r') as f: + actual = json.load(f) + expected = {'system': 'my:system', 'tests': [{'task_name': 'task1', 'suite': 'my/suite', 'variant': '', 'success': True, 'cmds': [{'cmd': 'rerun 1'}, {'cmd': 'another'}]}, + {'task_name': 'task2', 'suite': 'my/suite', 'variant': '', 'success': True, 'cmds': [{'cmd': 'original run'}]}]} + self.assertDictEqual(expected, actual) + with open(os.path.join(tmpdir, output_dir, 'my:other-system.json'), 'r') as f: + actual = json.load(f) + self.assertDictEqual(run_once_json, actual) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/lib/tools/feature_extractor.py b/tests/lib/tools/feature_extractor.py index eee4e799dcc..a41e696b6e0 100755 --- a/tests/lib/tools/feature_extractor.py +++ b/tests/lib/tools/feature_extractor.py @@ -64,7 +64,7 @@ def get_feature_dictionary(log_file: TextIO, feature_list: list[str], state_json try: state_json = json.load(args.state) - feature_dictionary = get_feature_dictionary(args.journal, args.features, state_json) + feature_dictionary = get_feature_dictionary(args.journal, args.feature, state_json) json.dump(feature_dictionary, open(args.output, "w")) except json.JSONDecodeError: raise RuntimeError("The state.json is not valid json")