diff --git a/.github/actions/run-test/action.yaml b/.github/actions/run-test/action.yaml index dd14b1bf..6a915b36 100644 --- a/.github/actions/run-test/action.yaml +++ b/.github/actions/run-test/action.yaml @@ -58,6 +58,16 @@ inputs: required: false default: "" type: string + tfvars-file: + description: "Optional artifact name containing a tfvars file to pass to pytest" + required: false + default: "" + type: string + architecture: + description: "Target architecture for the test run" + required: false + default: "amd64" + type: string runs: using: "composite" @@ -166,7 +176,7 @@ runs: - id: setup-python name: Setup Python - uses: actions/setup-python@v5.0.0 + uses: actions/setup-python@v5.3.0 with: python-version: "3.10" architecture: x64 @@ -222,6 +232,14 @@ runs: sudo snap install terraform --classic fi + - id: download-tfvars + name: Download tfvars artifact + if: ${{ inputs.tfvars-file != '' }} + uses: actions/download-artifact@v8 + with: + name: ${{ inputs.tfvars-file }} + path: tfvars-artifacts + - id: tests-integration name: Run Integration Tests shell: bash @@ -229,9 +247,34 @@ runs: AZURE_STORAGE_ACCOUNT: ${{ inputs.azure-storage-account }} AZURE_STORAGE_KEY: ${{ inputs.azure-storage-key }} run: | + TFVARS_ARG="" + if [[ -n "${{ inputs.tfvars-file }}" ]]; then + + version=${{ inputs.spark-version }} + major_minor="${version%.*}" # -> 3.5 + arch="${{ inputs.architecture }}" + + TFVARS_PATH="$(find tfvars-artifacts -type f -name "${major_minor}@${arch}.yaml" | head -n1)" + if [[ -z "$TFVARS_PATH" ]]; then + echo "ERROR: no tfvars file found in artifact '${{ inputs.tfvars-file }}'." + find tfvars-artifacts -type f || true + exit 1 + fi + + pip install pyyaml + python3 .github/scripts/charms_promotions.py get-revisions \ + --format yaml --file $TFVARS_PATH --output tfvars + + REVISION_FILE="${major_minor}@${arch}.tfvars.yaml" + + cp $REVISION_FILE python/revisions.tfvars.yaml + + TFVARS_ARG="--tfvars-file revisions.tfvars.yaml" + fi + juju add-model spark-bundle-test juju list-models - cd python && tox run -e ${{ inputs.tox-env }} -- -m '${{ steps.select-tests.outputs.mark_expression }}' --cos-model ${{ inputs.cos-model }} --spark-version ${{ inputs.spark-version }} --storage-backend ${{ inputs.storage-backend }} --model spark-bundle-test ${{ inputs.pytest-args }} + cd python && tox run -e ${{ inputs.tox-env }} -- -m '${{ steps.select-tests.outputs.mark_expression }}' --cos-model ${{ inputs.cos-model }} --spark-version ${{ inputs.spark-version }} --storage-backend ${{ inputs.storage-backend }} --model spark-bundle-test ${{ inputs.pytest-args }} $TFVARS_ARG echo "TEST_EXIT_CODE=$?" >> $GITHUB_ENV - id: collect-logs @@ -297,4 +340,3 @@ runs: name: integration-results-${{ inputs.tox-env }}-${{ inputs.k8s-distribution }}-${{ inputs.k8s-version }}-${{ inputs.spark-version }}-${{ inputs.storage-backend }}-${{ inputs.cos-model }}-${{ inputs.juju-agent-version }} path: test-result.json - diff --git a/.github/scripts/charms_promotions.py b/.github/scripts/charms_promotions.py new file mode 100644 index 00000000..964542ed --- /dev/null +++ b/.github/scripts/charms_promotions.py @@ -0,0 +1,725 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""Utilities for parsing charm status and promoting charm revisions.""" + +import json +import re +import shlex +import subprocess +import sys +from dataclasses import dataclass +from enum import Enum +from pathlib import Path +from typing import Callable + +import yaml + +RISKS = {"edge": 1, "beta": 2, "candidate": 3, "stable": 4} + + +class Charm: + """Representation of a charm with helpers to query status and promote revisions.""" + + def __init__( + self, + name: str, + revision: int | None = None, + channel: str | None = None, + architecture: str | None = None, + ): + self.name = name + self.revision = revision + self.channel = channel + self.architecture = architecture + + self._status: dict | None = None + + def __str__(self): + """Return a readable charm representation.""" + return f"{self.name}:{self.channel}({self.revision})" + + def __repr__(self): + """Return the canonical debug representation.""" + return self.__str__() + + def to_dict(self) -> dict: + """Return a dictionary representation of the charm.""" + return { + k: v + for k, v in { + "name": self.name, + "revision": self.revision, + "channel": self.channel, + "architecture": self.architecture, + }.items() + if v is not None + } + + @staticmethod + def from_dict(data: dict) -> "Charm": + """Create a Charm instance from a dictionary.""" + return Charm( + name=data["name"], + revision=data.get("revision"), + channel=data.get("channel"), + architecture=data.get("architecture"), + ) + + def get_status( + self, + channel: str | None = None, + revision: int | None = None, + architecture: str | None = None, + ): + """Return matching status entries for this charm.""" + if not channel: + channel = self.channel + if not revision: + revision = self.revision + if not architecture: + architecture = self.architecture + + if not self._status: + result = subprocess.run( + ["charmcraft", "status", self.name, "--format", "json"], + capture_output=True, + ) + + if result.returncode != 0: + err_output = ( + (result.stderr or b"").decode("utf-8", errors="replace").strip() + ) + std_output = ( + (result.stdout or b"").decode("utf-8", errors="replace").strip() + ) + if ( + "permission-required" in std_output + or "permission-required" in err_output + ): + raise PermissionError(self.name) + else: + raise RuntimeError( + self.name, shlex.join(result.args), err_output or std_output + ) + + self._status = json.loads(result.stdout.decode("utf-8")) + + items = [] + for track in self._status: + for mapping in track["mappings"]: + base = mapping["base"] + if not base: + continue + + for release in mapping["releases"]: + if channel is not None and channel != release["channel"]: + continue + if revision is not None and revision != release["revision"]: + continue + if ( + architecture is not None + and architecture != base["architecture"] + ): + continue + items.append({"base": base} | release) + + return items + + def resolve_architecture(self) -> "Charm": + """Resolve a single architecture for a charm/channel/revision tuple.""" + if self.architecture is not None: + print(f"INFO: Using provided architecture {self.architecture}") + return self + + items = self.get_status(channel=self.channel, revision=self.revision) + + if not items: + raise ValueError( + f"No architecture found for charm={self.name}, channel={self.channel}, revision={self.revision}" + ) + + if len(items) > 1: + architectures = sorted(item["base"]["architecture"] for item in items) + raise ValueError( + "Multiple architectures found for " + f"charm={self.name}, channel={self.channel}, revision={self.revision}: {architectures}" + ) + + architecture = items[0]["base"]["architecture"] + + if not isinstance(architecture, str): + raise ValueError( + f"Unexpected architecture type for charm={self.name}, channel={self.channel}, revision={self.revision}" + ) + + self.architecture = architecture + return self + + def resolve_revision(self) -> "Charm": + """Resolve a single revision for a charm/channel/architecture tuple.""" + if self.revision is not None: + print(f"INFO: Using provided revision {self.revision}") + return self + + items = self.get_status(channel=self.channel, architecture=self.architecture) + + if not items: + raise ValueError( + f"No revision found for charm={self.name}, channel={self.channel}, architecture={self.architecture}" + ) + + if len(items) > 1: + revisions = sorted(item["revision"] for item in items) + raise ValueError( + "Multiple revisions found for " + f"charm={self.name}, channel={self.channel}, architecture={self.architecture}: {revisions}" + ) + + revision = items[0]["revision"] + + if not isinstance(revision, int): + raise ValueError( + f"Unexpected revision type for charm={self.name}, channel={self.channel}, architecture={self.architecture}" + ) + + self.revision = revision + return self + + def promote_version(self, risk: str, dry_run: bool = True): + """Promote a charm revision to a higher-risk channel.""" + if risk not in RISKS.keys(): + raise ValueError("The risk is not recognized") + + if not self.channel: + raise ValueError( + "channel field is not specified, that is required for promotion" + ) + + items = self.get_status(revision=self.revision, channel=self.channel) + + if len(items) > 1: + raise ValueError( + f"Multiple tracks match the provide revision and channel combination: {items}" + ) + + if len(items) == 0: + raise ValueError(f"No match found for charm {self}") + + item = items[0] + + _channel = Path(item["channel"]) + risk_from = str(_channel.name) + + if risk_from not in RISKS.keys(): + raise ValueError("The revision does not belong to a risk channel") + + if RISKS[risk] <= RISKS[risk_from]: + raise ValueError("A charm revision cannot be promoted to a lower risk.") + + channel_to = str(_channel.parent / risk) + + cmds = [ + "charmcraft", + "release", + self.name, + f"--channel={channel_to}", + f"--revision={self.revision}", + ] + [ + f"--resource={resource['name']}:{resource['revision']}" + for resource in item["resources"] + ] + + if dry_run: + return "INFO: (dry-run mode) " + shlex.join(cmds) + + return subprocess.check_output(cmds).decode("utf-8") + + +class Format(str, Enum): + """Supported status input formats.""" + + TEXT = "text" + YAML = "yaml" + + +@dataclass +class Bundle: + """Collection of charms parsed from status content.""" + + charms: list[Charm] + + @classmethod + def from_status(cls, content: str | Path, format: Format | str = Format.TEXT): + """Parse status content into a Bundle.""" + if isinstance(content, Path): + content = content.read_text(encoding="utf-8") + + parsers = {Format.TEXT: TextParser, Format.YAML: YAMLParser} + normalized_format = Format(format) + + return parsers[normalized_format].parse(content) + + def filter(self, condition: Callable[[Charm], bool]) -> "Bundle": + """Return a new Bundle containing only charms that satisfy the condition.""" + return Bundle(charms=[charm for charm in self.charms if condition(charm)]) + + def exclude(self, names: set[str]) -> "Bundle": + """Return a new Bundle excluding charms whose name is in the provided set.""" + return self.filter(lambda charm: charm.name not in names) + + def to_status(self, file: Path, format: Format | str = Format.YAML): + """Serialize the Bundle to a status format.""" + # This method is not implemented as it's not required for the current use case, + # but it could be implemented if needed in the future. + if format == Format.TEXT: + raise NotImplementedError( + "Bundle.to_status with text format is not implemented yet." + ) + + remapping_keys = { + "channel": "charm-channel", + "revision": "charm-rev", + "architecture": "charm-arch", + "name": "charm-name", + } + + def remap_charm_dict(charm_dict: dict) -> dict: + return {remapping_keys.get(k, k): v for k, v in charm_dict.items()} + + with file.open("w", encoding="utf-8") as fid: + yaml.dump( + { + "applications": { + charm.name: remap_charm_dict(charm.to_dict()) + for charm in self.charms + } + }, + fid, + ) + + def to_tfvars(self, filename: Path, mapping: dict): + """Serialize the Bundle to a Terraform JSON format.""" + with filename.open("w", encoding="utf-8") as fid: + yaml.dump( + { + mapping.get(charm.name, charm.name): charm.revision + for charm in self.charms + }, + fid, + ) + + +@dataclass +class Release: + """Collection of named charm bundles.""" + + bundles: dict[str, Bundle] + + @staticmethod + def bucketize( + charms: list[Charm], key: Callable[[Charm], str] + ) -> dict[str, Bundle]: + """Group charms into bundles using the provided key function.""" + buckets: dict[str, list[Charm]] = {} + for charm in charms: + bucket_key = key(charm) + buckets.setdefault(bucket_key, []).append(charm) + + return {bucket_key: Bundle(charms) for bucket_key, charms in buckets.items()} + + def to_json(self) -> dict: + """Serialize the release bundles to JSON-compatible data.""" + return { + name: [charm.to_dict() for charm in bundle.charms] + for name, bundle in self.bundles.items() + } + + @staticmethod + def from_json(data: dict) -> "Release": + """Build a Release from JSON-compatible serialized bundle data.""" + bundles = { + name: Bundle([Charm.from_dict(charm_data) for charm_data in bundle_data]) + for name, bundle_data in data.items() + } + return Release(bundles=bundles) + + +class YAMLParser: + """Parser for Juju YAML status output.""" + + @staticmethod + def parse(content: str | Path): + """Parse YAML status content and return a Bundle.""" + if isinstance(content, Path): + content = content.read_text(encoding="utf-8") + + data = yaml.safe_load(content) + + return Bundle( + [ + Charm( + name=app["charm-name"], + revision=int(app["charm-rev"]), + channel=app.get("charm-channel"), + architecture=app.get("charm-arch"), + ).resolve_architecture() + for _, app in data["applications"].items() + ] + ) + + +class TextParser: + """ + Parse a juju status output to find the charms deployed and their metadata (channel, revision, etc). + + The `parse` method is used to extract the list of charms, returned as a Bundle class. + The reason why simpler parsing methods could not be used, is that the output format of a juju status is + made peculiar by some columns header being aligned right instead of left, e.g. see Rev below: + + App Version Status Scale Charm Channel Rev Address Exposed Message + ... + istio-ingressgateway active 1 istio-gateway 1.28/edge 1594 10.0.165.149 no + ... + + The algorithm parse the header and finds a first guess for the columns start. When parsing each rows, the algorithm updates the width + of each column using the width of the value itself, then treating the remaining chars as the values for the next columns. This algorithm falls short + if the values has multiple white spaces (also in headers). However this only happens in the value of the "Message" column, which is accounted for. + """ + + # The following regex extract the first word that may be preceded by + # leading spaces + word_with_leading_spaces = re.compile(r"^\s*[^\s]+") + + @staticmethod + def extract_first_word(mystring): + """Extract the first token, preserving leading spaces in the match.""" + m = TextParser.word_with_leading_spaces.match(mystring) + if m: + return mystring[m.start() : m.end()] + return "" + + @staticmethod + def parse_line(line, indices): + """Split a line using fixed-width index ranges.""" + return [line[start:end].strip() for start, end in indices] + + @staticmethod + def parse(content: str | Path): + """Parse text status output and return a Bundle.""" + if isinstance(content, Path): + content = content.read_text(encoding="utf-8") + + lines = content.splitlines() + + white_spaces = re.compile(r"\s+\s+") + + app_header_index = next( + ( + index + for index, line in enumerate(lines) + if line.strip().startswith("App") + and "Charm" in line + and "Channel" in line + ), + None, + ) + + if app_header_index is None: + raise ValueError("Could not locate applications table in status text") + + table_lines = [] + for line in lines[app_header_index:]: + if table_lines and (not line.strip() or line.strip().startswith("Unit")): + break + table_lines.append(line) + + if len(table_lines) < 2: + raise ValueError("Applications table is empty in status text") + + # Get header + header = table_lines[0] + + # First guess of width based on headers + ends = [s.end() for s in white_spaces.finditer(header)] + indices = list(zip([0] + ends, ends + [-1], strict=False)) + + # Columns names + columns = TextParser.parse_line(header, indices) + + # Second guess of width based on maximum width of values + # This is due to the fact that some columns the text extends to before the start + # of the columns header (text aligned right) + widths = [len(column) for column in columns] + for line in table_lines[1:]: + widths = list( + map( + max, + zip( + widths, + [ + len(TextParser.extract_first_word(line[start:end])) + for start, end in indices + ], + strict=False, + ), + ) + ) + + # New guess + ends = [ + start + width for (start, _), width in zip(indices, widths, strict=False) + ] + + indices = list(zip([0] + ends[:-1], ends[:-1] + [-1], strict=False)) + + data = [ + dict(zip(columns, TextParser.parse_line(line, indices), strict=False)) + for line in table_lines[1:] + if line.strip() + ] + + return Bundle( + [ + Charm( + item["Charm"], int(item["Rev"]), item["Channel"] + ).resolve_architecture() + for item in data + if item["Charm"] + ] + ) + + +@dataclass(frozen=True) +class CharmSpec: + """Charm specification for a release.""" + + name: str + channel: str + revision: int | None = None + + +@dataclass(frozen=True) +class Specs: + """Top-level specs model.""" + + architectures: list[str] + releases: dict[str, list[CharmSpec]] + + @classmethod + def parse(cls, content: str | Path) -> "Specs": + """Load and validate specs YAML, returning a typed Specs object.""" + if isinstance(content, Path): + content = content.read_text(encoding="utf-8") + + data = yaml.safe_load(content) + + if not isinstance(data, dict): + raise ValueError("Invalid specs format: root must be a mapping") + + architectures = data.get("architectures") + releases = data.get("releases") + + if not isinstance(architectures, list) or not all( + isinstance(a, str) for a in architectures + ): + raise ValueError( + "Invalid specs format: architectures must be a list of strings" + ) + + if not isinstance(releases, dict): + raise ValueError("Invalid specs format: releases must be a mapping") + + parsed_releases: dict[str, list[CharmSpec]] = {} + for release, charms in releases.items(): + if not isinstance(charms, list): + raise ValueError( + f"Invalid specs format: releases.{release} must be a list" + ) + + parsed_entries: list[CharmSpec] = [] + for entry in charms: + if not isinstance(entry, dict): + raise ValueError( + f"Invalid specs format: release entry for {release} must be a mapping" + ) + + charm_name = entry.get("name") + channel = entry.get("channel") + revision = entry.get("revision") + + if not isinstance(charm_name, str) or not isinstance(channel, str): + raise ValueError( + f"Invalid specs format: release entry for {release} must have string name and channel" + ) + + parsed_entries.append( + CharmSpec(name=charm_name, channel=channel, revision=revision) + ) + + parsed_releases[str(release)] = parsed_entries + + return cls(architectures=architectures, releases=parsed_releases) + + @staticmethod + def _normalize_channel(channel: str, risk: str) -> str: + """Expand channel prefixes like '3.4/' to full channels like '3.4/beta'.""" + if channel.endswith("/"): + return f"{channel}{risk}" + return channel + + def resolve_charms(self, risk: str = "edge") -> Release: + """Build the output mapping keyed by architecture, release, and mapped revision key.""" + releases = {} + + for release, charms in self.releases.items(): + items = [] + for architecture in self.architectures: + for entry in charms: + charm_name = entry.name + channel = self._normalize_channel(entry.channel, risk) + + items.append( + Charm( + name=charm_name, + revision=None, + channel=channel, + architecture=architecture, + ).resolve_revision() + ) + + releases[str(release)] = Release.bucketize( + items, key=lambda charm: charm.architecture + ) + + # Flatten the mapping to have keys of the form "release@architecture" + return Release( + bundles={ + f"{release}@{architercture}": bundle + for release, bundles in releases.items() + for architercture, bundle in bundles.items() + } + ) + + +MAPPING = { + "kyuubi-k8s": "kyuubi_revision", + "spark-history-server-k8s": "history_server_revision", + "spark-integration-hub-k8s": "integration_hub_revision", +} + + +def _load_release( + input_file: Path, format_name: str, risk: str | None = None +) -> Release: + """Load release data from status/spec input based on the selected format.""" + if format_name in ("text", "yaml"): + filename = input_file.name.removesuffix(input_file.suffix) + return Release( + bundles={ + filename: Bundle.from_status(input_file, format_name), + } + ) + + if format_name == "spec": + source_risk = risk if risk else "edge" + return Specs.parse(input_file).resolve_charms(risk=source_risk) + + raise ValueError(f"Unsupported format: {format_name}") + + +def _write_revisions(release: Release, output_dir: Path, output_format: str): + """Write one status file per release bundle, using bundle key as filename.""" + output_dir.mkdir(parents=True, exist_ok=True) + + if output_format == "tfvars": + for key, bundle in release.bundles.items(): + bundle.to_tfvars(output_dir / f"{key}.tfvars.yaml", mapping=MAPPING) + elif output_format == "yaml": + for key, bundle in release.bundles.items(): + bundle.to_status(output_dir / f"{key}.yaml") + + +def _run_promote(args) -> int: + """Execute promotion flow.""" + release = _load_release(Path(args.file), args.format, risk=args.promote_to) + + for variant, bundle in release.bundles.items(): + print(f"Processing release {variant} with charms: {bundle.charms}") + for charm in bundle.charms: + try: + print(charm.promote_version(args.promote_to, args.dry_run)) + except ValueError as err: + print( + f"ERROR: skipping {charm.name} due to failure in promotion. Reason: {type(err).__name__} - Details: {','.join(err.args)}" + ) + except PermissionError: + print( + f"WARNING: skipping '{charm.name}' due to missing permissions.", + file=sys.stderr, + ) + except RuntimeError as err: + print( + f"WARNING: skipping '{err.args[0]}' after command failure: {err.args[1]}", + file=sys.stderr, + ) + + return 0 + + +def _run_get_revisions(args) -> int: + """Execute revisions generation flow.""" + release = _load_release(Path(args.file), args.format, risk=args.risk) + _write_revisions(release, Path(args.output_dir), args.output) + print(f"INFO: wrote revisions files under {args.output_dir}") + return 0 + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser() + subparsers = parser.add_subparsers(dest="command", required=True) + + promote_parser = subparsers.add_parser("promote") + promote_action_group = promote_parser.add_mutually_exclusive_group() + promote_action_group.add_argument( + "--dry-run", + dest="dry_run", + action="store_true", + help="Print release commands without executing them (default)", + ) + promote_action_group.add_argument( + "--apply", + dest="dry_run", + action="store_false", + help="Execute charmcraft release commands", + ) + promote_parser.set_defaults(dry_run=True) + promote_parser.add_argument( + "--promote-to", choices=("beta", "candidate", "stable"), required=True + ) + promote_parser.add_argument( + "--format", choices=("text", "yaml", "spec"), required=True + ) + promote_parser.add_argument("--file", required=True) + + get_revisions_parser = subparsers.add_parser("get-revisions") + get_revisions_parser.add_argument( + "--format", choices=("text", "yaml", "spec"), required=True + ) + get_revisions_parser.add_argument("--file", required=True) + get_revisions_parser.add_argument( + "--risk", choices=("edge", "beta", "candidate", "stable"), default="edge" + ) + get_revisions_parser.add_argument( + "--output", choices=("yaml", "tfvars"), default="yaml" + ) + get_revisions_parser.add_argument("--output-dir", default=".") + + args = parser.parse_args() + + if args.command == "promote": + raise SystemExit(_run_promote(args)) + + if args.command == "get-revisions": + raise SystemExit(_run_get_revisions(args)) diff --git a/.github/workflows/automatic-promotion.yaml b/.github/workflows/automatic-promotion.yaml new file mode 100644 index 00000000..9e4de846 --- /dev/null +++ b/.github/workflows/automatic-promotion.yaml @@ -0,0 +1,84 @@ +# Copyright 2026 Canonical Ltd. +# See LICENSE file for licensing details. + +name: Automatic promotion of bundle revisions + +on: + pull_request: + workflow_dispatch: + inputs: + + from-risk: + description: "Risk level to fetch revisions from" + required: true + default: edge + type: choice + options: + - edge + - beta + - candidate + + to-risk: + description: "Risk level to promote revisions to" + required: true + default: candidate + type: choice + options: + - beta + - candidate + - stable + + gate: + description: "Workflow to gate promotion on" + required: true + default: ci-tests-full + type: choice + options: + - ci-tests-minimal + - ci-tests-full + + dry-run: + description: "Whether to run promotion in dry-run mode" + required: true + default: true + type: boolean + +jobs: + generate-revisions: + name: Generate charm revisions + uses: ./.github/workflows/generate-charm-revisions.yaml + secrets: inherit + with: + risk: ${{ inputs.from-risk || 'edge' }} + + tests-full: + name: Run CI tests (full) + needs: generate-revisions + if: github.event_name == 'pull_request' || github.event.inputs.gate == 'ci-tests-full' + uses: ./.github/workflows/ci-tests-full.yaml + secrets: inherit + with: + tfvars-file: ${{ needs.generate-revisions.outputs.tfvars-artifact-name }} + + tests-minimal: + name: Run CI tests (minimal) + needs: generate-revisions + if: github.event.inputs.gate == 'ci-tests-minimal' + uses: ./.github/workflows/ci-tests-minimal.yaml + secrets: inherit + with: + tfvars-file: ${{ needs.generate-revisions.outputs.tfvars-artifact-name }} + + promotion: + name: Promotion + needs: + - generate-revisions + - tests-full + - tests-minimal + if: ${{ always() && (needs.tests-full.result == 'success' || needs.tests-minimal.result == 'success') }} + uses: ./.github/workflows/promote-charm-revisions.yaml + secrets: inherit + with: + tfvars-artifact-name: ${{ needs.generate-revisions.outputs.tfvars-artifact-name }} + risk: ${{ inputs.to-risk || 'beta'}} + dry-run: ${{ inputs.dry-run || true }} diff --git a/.github/workflows/ci-tests-full.yaml b/.github/workflows/ci-tests-full.yaml index d219a1ee..79b2432d 100644 --- a/.github/workflows/ci-tests-full.yaml +++ b/.github/workflows/ci-tests-full.yaml @@ -8,6 +8,12 @@ on: - cron: "53 2 * * *" # Daily at 02:53 UTC workflow_dispatch: workflow_call: + inputs: + tfvars-file: + description: "Optional artifact name containing tfvars for test runs" + required: false + type: string + default: "" jobs: checks: @@ -29,6 +35,7 @@ jobs: storage-backend: ["s3", "azure_storage"] cos-model: ["cos"] juju-snap-channel: ["3.6/stable"] + juju-agent-version: ["3.6.14"] k8s-distribution: ["k8s", "microk8s"] k8s-version: - "1.28" @@ -40,24 +47,6 @@ jobs: k8s-version: "1.30" - k8s-distribution: "k8s" k8s-version: "1.28" - include: - # Add juju snap channel and agent version to all matrix combinations - - juju-snap-channel: "3.5/stable" - juju-agent-version: "3.5.6" - - juju-snap-channel: "3.6/stable" - juju-agent-version: "3.6.14" - - # Test integration-basic, with Spark 3.4 - - tox-env: integration-basic - spark-version: 3.4.4 - juju-snap-channel: "3.6/stable" - juju-agent-version: "3.6.14" - - # Test integration-basic, with Spark 3.5 - - tox-env: integration-basic - spark-version: 3.5.7 - juju-snap-channel: "3.6/stable" - juju-agent-version: "3.6.14" needs: - checks @@ -79,6 +68,7 @@ jobs: azure-storage-account: ${{ secrets.AZURE_STORAGE_ACCOUNT }} azure-storage-key: ${{ secrets.AZURE_STORAGE_KEY }} pytest-args: ${{ matrix.tox-env != 'integration-backup-restore' && '--keep-models' || '' }} + tfvars-file: ${{ inputs.tfvars-file }} aggregate-results: name: Aggregate Results diff --git a/.github/workflows/ci-tests-minimal.yaml b/.github/workflows/ci-tests-minimal.yaml index ba143f5d..7784c116 100644 --- a/.github/workflows/ci-tests-minimal.yaml +++ b/.github/workflows/ci-tests-minimal.yaml @@ -5,6 +5,12 @@ name: Run CI tests on: workflow_call: + inputs: + tfvars-file: + description: "Optional artifact name containing tfvars for test runs" + required: false + type: string + default: "" jobs: checks: @@ -91,6 +97,7 @@ jobs: azure-storage-account: ${{ secrets.AZURE_STORAGE_ACCOUNT }} azure-storage-key: ${{ secrets.AZURE_STORAGE_KEY }} pytest-args: ${{ matrix.tox-env != 'integration-backup-restore' && '--keep-models' || '' }} + tfvars-file: ${{ inputs.tfvars-file }} aggregate-results: name: Aggregate Results diff --git a/.github/workflows/generate-charm-revisions.yaml b/.github/workflows/generate-charm-revisions.yaml new file mode 100644 index 00000000..313d8e8c --- /dev/null +++ b/.github/workflows/generate-charm-revisions.yaml @@ -0,0 +1,73 @@ +# Copyright 2026 Canonical Ltd. +# See LICENSE file for licensing details. + +name: Generate charm revisions + +on: + workflow_call: + inputs: + risk: + description: "Risk level used to resolve charm revisions" + required: true + type: string + outputs: + tfvars-artifact-name: + description: "Name of the artifact containing generated tfvars" + value: ${{ jobs.generate-revisions.outputs.tfvars-artifact-name }} + workflow_dispatch: + inputs: + risk: + description: "Risk level used to resolve charm revisions" + required: true + default: beta + type: choice + options: + - edge + - beta + - candidate + - stable + +jobs: + generate-revisions: + name: Generate revisions + runs-on: ubuntu-22.04 + timeout-minutes: 10 + outputs: + tfvars-artifact-name: ${{ steps.artifact-name.outputs.value }} + steps: + - name: Checkout repo + uses: actions/checkout@v6 + + - name: Setup Python + uses: actions/setup-python@v5.3.0 + with: + python-version: "3.10" + architecture: x64 + + - name: Install charmcraft + run: | + sudo snap install charmcraft --classic + charmcraft version + + - name: Install dependencies + run: pip install pyyaml + + - name: Run get-revisions script + env: + CHARMCRAFT_AUTH: ${{ secrets.CHARMHUB_TOKEN }} + run: | + python3 .github/scripts/charms_promotions.py get-revisions \ + --format spec --file specs.yaml --output yaml \ + --output-dir revisions --risk ${{ inputs.risk }} + + - name: Upload revisions.yaml artifact + id: upload-revisions + uses: actions/upload-artifact@v6 + with: + name: charms-${{ inputs.risk }} + path: revisions + if-no-files-found: error + + - name: Expose artifact name + id: artifact-name + run: echo "value=charms-${{ inputs.risk }}" >> "$GITHUB_OUTPUT" diff --git a/.github/workflows/on_pull_request.yaml b/.github/workflows/on_pull_request.yaml index cd953934..f07a0bb6 100644 --- a/.github/workflows/on_pull_request.yaml +++ b/.github/workflows/on_pull_request.yaml @@ -18,5 +18,6 @@ on: jobs: tests: name: Run CI tests on a PR + if: false uses: ./.github/workflows/ci-tests-minimal.yaml secrets: inherit \ No newline at end of file diff --git a/.github/workflows/promote-charm-revisions.yaml b/.github/workflows/promote-charm-revisions.yaml new file mode 100644 index 00000000..77fd1500 --- /dev/null +++ b/.github/workflows/promote-charm-revisions.yaml @@ -0,0 +1,64 @@ +# Copyright 2026 Canonical Ltd. +# See LICENSE file for licensing details. + +name: Promote charm revisions + +on: + workflow_call: + inputs: + tfvars-artifact-name: + description: "Name of the artifact containing generated tfvars" + required: true + type: string + risk: + description: "Risk level to promote revisions to" + required: true + type: string + dry-run: + description: "Whether to run promotion in dry-run mode" + required: true + type: boolean + +jobs: + promotion: + name: Promotion + runs-on: ubuntu-22.04 + steps: + - name: Checkout repo + uses: actions/checkout@v6 + + - id: setup-python + name: Setup Python + uses: actions/setup-python@v5.3.0 + with: + python-version: "3.10" + architecture: x64 + + - id: download-tfvars + name: Download tfvars artifact + uses: actions/download-artifact@v8 + with: + name: ${{ inputs.tfvars-artifact-name }} + path: tfvars-artifacts + + - id: run-promotion + name: Run promotion + shell: bash + env: + CHARMCRAFT_AUTH: ${{ secrets.CHARMHUB_TOKEN }} + run: | + pip install pyyaml + sudo snap install charmcraft --classic + + PROMOTION_MODE="--dry-run" + if [[ "${{ inputs.dry-run }}" == "false" ]]; then + PROMOTION_MODE="--apply" + fi + + REVISION_FILES=$(find tfvars-artifacts -type f -name "*.yaml") + for REVISION_FILE in $REVISION_FILES; do + echo "Processing $REVISION_FILE" + python3 .github/scripts/charms_promotions.py promote \ + --format yaml --promote-to "${{ inputs.risk }}" "$PROMOTION_MODE" \ + --file "$REVISION_FILE" + done diff --git a/python/tests/integration/conftest.py b/python/tests/integration/conftest.py index 3717c100..c5108f7e 100644 --- a/python/tests/integration/conftest.py +++ b/python/tests/integration/conftest.py @@ -4,6 +4,7 @@ from __future__ import annotations import base64 +import json import logging import os import shutil @@ -19,6 +20,7 @@ import jubilant import pytest +import yaml from dotenv import load_dotenv from spark8t.domain import PropertyFile from tenacity import retry, stop_after_attempt @@ -119,6 +121,12 @@ def pytest_addoption(parser): help="This, together with the `--model` parameter, ensures that all functions " "marked with the` skip_if_deployed` tag are skipped.", ) + parser.addoption( + "--tfvars-file", + type=str, + default=None, + help="Path to a JSON/YAML file containing Terraform variables to pass to the bundle deployment", + ) def determine_scope(fixture_name, config): @@ -194,6 +202,36 @@ def test_uuid(request) -> str: return request.config.getoption("--uuid") or str(uuid.uuid4()) +@pytest.fixture(scope="module") +def tfvars(request) -> dict: + """External Terraform variables loaded from a file.""" + tfvars_file = request.config.getoption("--tfvars-file") + + if not tfvars_file: + return {} + + file_path = Path(tfvars_file) + if not file_path.exists(): + raise FileNotFoundError(f"Terraform variables file not found: {tfvars_file}") + + # Load JSON or YAML file + with file_path.open("r", encoding="utf-8") as f: + try: + if file_path.suffix.lower() == ".json": + data = json.load(f) + else: + data = yaml.safe_load(f) + except (json.JSONDecodeError, yaml.YAMLError) as err: + raise ValueError( + f"Failed to parse Terraform variables file: {err}" + ) from err + + if not isinstance(data, dict): + raise ValueError("Terraform variables file must contain a JSON/YAML mapping") + + return data + + @pytest.fixture(scope="module") def image_properties(spark_image): return PropertyFile( @@ -410,6 +448,7 @@ def spark_bundle( object_storage, admin_password, private_key, + tfvars, ): """Deploy the Spark K8s bundle using Terraform.""" short_version = ".".join(spark_version.split(".")[:2]) @@ -432,6 +471,9 @@ def spark_bundle( "admin_password": admin_password, "tls_private_key": private_key, } + # Merge external Terraform variables + base_vars.update(tfvars) + cos_vars = {"cos_model_uuid": cos} if cos else {} if storage_backend == "azure_storage": diff --git a/python/tests/integration/resources/main.tf b/python/tests/integration/resources/main.tf index 5e4f39fd..fd949c45 100644 --- a/python/tests/integration/resources/main.tf +++ b/python/tests/integration/resources/main.tf @@ -67,6 +67,24 @@ variable "tls_private_key" { default = null } +variable "history_server_revision" { + description = "Charm revision for spark-history-server-k8s" + type = number + default = null +} + +variable "kyuubi_revision" { + description = "Charm revision for kyuubi-k8s" + type = number + default = null +} + +variable "integration_hub_revision" { + description = "Charm revision for spark-integration-hub-k8s" + type = number + default = null +} + module "cos" { count = var.cos_model_uuid == null ? 0 : 1 # TODO: Pin to tag once available @@ -91,7 +109,10 @@ module "spark" { admin_password = var.admin_password azure_storage_config = var.azure_storage_config azure_storage_secret_key = var.azure_storage_secret_key + history_server_revision = var.history_server_revision + integration_hub_revision = var.integration_hub_revision kyuubi_config = var.kyuubi_config + kyuubi_revision = var.kyuubi_revision kyuubi_users_size = "500M" metastore_size = "500M" s3_config = var.s3_config diff --git a/specs.yaml b/specs.yaml new file mode 100644 index 00000000..daeead10 --- /dev/null +++ b/specs.yaml @@ -0,0 +1,18 @@ +architectures: + - amd64 + - arm64 +releases: + 3.4: + - name: kyuubi-k8s + channel: 3.4/ + - name: spark-history-server-k8s + channel: 3/ + - name: spark-integration-hub-k8s + channel: 3/ + 3.5: + - name: kyuubi-k8s + channel: 3.5/ + - name: spark-history-server-k8s + channel: 3/ + - name: spark-integration-hub-k8s + channel: 3/ \ No newline at end of file