diff --git a/.github/workflows/conformance.yml b/.github/workflows/conformance.yml new file mode 100644 index 00000000..98975de2 --- /dev/null +++ b/.github/workflows/conformance.yml @@ -0,0 +1,42 @@ +# Copyright 2024 The Update Framework Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License +# +# SPDX-License-Identifier: Apache-2.0 + +name: TUF Conformance + +on: + push: + pull_request: + schedule: + # Weekly run on Wednesday at 06:30 UTC so the conformance report stays fresh. + - cron: '30 6 * * 3' + +jobs: + tuf-conformance: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-go@v5 + with: + go-version-file: go.mod + cache: true + + - name: Build tuf-conformance-client + run: go build -o tuf-conformance-client ./cmd/tuf-conformance-client + + - uses: theupdateframework/tuf-conformance@v2 + with: + entrypoint: ./tuf-conformance-client diff --git a/Makefile b/Makefile index a5ff219f..a132603f 100644 --- a/Makefile +++ b/Makefile @@ -48,6 +48,25 @@ build-%: test: GODEBUG=rsa1024min=0 go test -race -covermode atomic ./... +##################### +# conformance section +##################### + +# Build the tuf-conformance client-under-test binary. +.PHONY: build-conformance-client +build-conformance-client: + @echo "Building tuf-conformance-client" + @go build -o tuf-conformance-client ./cmd/tuf-conformance-client + +# Run the tuf-conformance test suite against this implementation. +# Requires tuf-conformance to be installed: +# pip install tuf-conformance +# or clone https://github.com/theupdateframework/tuf-conformance and run: +# make dev +.PHONY: conformance +conformance: build-conformance-client + pytest tuf_conformance --entrypoint ./tuf-conformance-client + ##################### # lint section ##################### diff --git a/cmd/tuf-conformance-client/main.go b/cmd/tuf-conformance-client/main.go new file mode 100644 index 00000000..c321c0ab --- /dev/null +++ b/cmd/tuf-conformance-client/main.go @@ -0,0 +1,277 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 + +// tuf-conformance-client is the client-under-test executable required by the +// tuf-conformance test suite (https://github.com/theupdateframework/tuf-conformance). +// +// It implements the three-command CLI protocol described in CLIENT-CLI.md: +// +// init – bootstrap trusted metadata from a root.json file +// refresh – update top-level metadata from the repository +// download – download and verify a target artifact +// +// The tool exits with code 0 on success and code 1 on any failure. +package main + +import ( + "fmt" + "os" + "path/filepath" + + stdlog "log" + + "github.com/go-logr/stdr" + "github.com/spf13/cobra" + "github.com/theupdateframework/go-tuf/v2/metadata" + "github.com/theupdateframework/go-tuf/v2/metadata/config" + "github.com/theupdateframework/go-tuf/v2/metadata/updater" +) + +func main() { + if err := newRootCmd().Execute(); err != nil { + os.Exit(1) + } +} + +// flags shared across all sub-commands. +var ( + metadataDir string + metadataURL string + verbose bool +) + +func newRootCmd() *cobra.Command { + root := &cobra.Command{ + Use: "tuf-conformance-client", + Short: "TUF client-under-test for the tuf-conformance test suite", + Long: `tuf-conformance-client implements the client-under-test CLI protocol required +by the tuf-conformance test suite. + +See https://github.com/theupdateframework/tuf-conformance/blob/main/CLIENT-CLI.md`, + SilenceUsage: true, + } + + root.PersistentFlags().StringVar(&metadataDir, "metadata-dir", "", "directory for trusted local metadata (required)") + root.PersistentFlags().StringVar(&metadataURL, "metadata-url", "", "URL of the repository metadata store") + root.PersistentFlags().BoolVar(&verbose, "verbose", false, "enable verbose logging") + + root.AddCommand(newInitCmd()) + root.AddCommand(newRefreshCmd()) + root.AddCommand(newDownloadCmd()) + + return root +} + +// configureLogger sets up the go-tuf logger at an appropriate verbosity level. +func configureLogger(prefix string) { + logger := stdr.New(stdlog.New(os.Stderr, prefix+": ", stdlog.LstdFlags)) + metadata.SetLogger(logger) + if verbose { + stdr.SetVerbosity(5) + } +} + +// newInitCmd returns the `init` sub-command. +// +// Usage: tuf-conformance-client --metadata-dir DIR init TRUSTED_ROOT +// +// It copies the provided root.json file into METADATA_DIR as "root.json" +// without contacting the network. +func newInitCmd() *cobra.Command { + return &cobra.Command{ + Use: "init ", + Short: "Bootstrap trusted metadata from a root.json file", + Long: `Initialize the client by copying the provided trusted root.json into +--metadata-dir. No network requests are made during this step.`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + configureLogger("init") + + if metadataDir == "" { + return fmt.Errorf("--metadata-dir is required") + } + + trustedRoot := args[0] + + rootBytes, err := os.ReadFile(trustedRoot) + if err != nil { + return fmt.Errorf("read trusted root %q: %w", trustedRoot, err) + } + + if err := os.MkdirAll(metadataDir, 0750); err != nil { + return fmt.Errorf("create metadata dir %q: %w", metadataDir, err) + } + + dest := filepath.Join(metadataDir, "root.json") + if err := os.WriteFile(dest, rootBytes, 0644); err != nil { + return fmt.Errorf("write root.json to %q: %w", dest, err) + } + + fmt.Fprintln(os.Stderr, "init: trusted root written to", dest) + return nil + }, + } +} + +// newRefreshCmd returns the `refresh` sub-command. +// +// Usage: tuf-conformance-client --metadata-dir DIR --metadata-url URL refresh +// +// It runs the TUF client workflow to update top-level metadata and writes the +// trusted metadata files (root.json, targets.json, snapshot.json, +// timestamp.json) into METADATA_DIR using non-versioned filenames. +func newRefreshCmd() *cobra.Command { + return &cobra.Command{ + Use: "refresh", + Short: "Update top-level metadata from the repository", + Long: `Fetch and verify the TUF top-level metadata (root, targets, snapshot, +timestamp) from --metadata-url, storing the trusted copies in --metadata-dir +using non-versioned filenames.`, + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + configureLogger("refresh") + + if metadataDir == "" { + return fmt.Errorf("--metadata-dir is required") + } + if metadataURL == "" { + return fmt.Errorf("--metadata-url is required") + } + + rootBytes, err := os.ReadFile(filepath.Join(metadataDir, "root.json")) + if err != nil { + return fmt.Errorf("read local root.json: %w", err) + } + + cfg, err := config.New(metadataURL, rootBytes) + if err != nil { + return fmt.Errorf("create updater config: %w", err) + } + cfg.LocalMetadataDir = metadataDir + + up, err := updater.New(cfg) + if err != nil { + return fmt.Errorf("create updater: %w", err) + } + + if err := up.Refresh(); err != nil { + return fmt.Errorf("refresh: %w", err) + } + + fmt.Fprintln(os.Stderr, "refresh: metadata updated successfully") + return nil + }, + } +} + +// newDownloadCmd returns the `download` sub-command. +// +// Usage: +// +// tuf-conformance-client \ +// --metadata-dir DIR \ +// --metadata-url URL \ +// --target-name PATH \ +// --target-base-url URL \ +// --target-dir DIR \ +// download +// +// It refreshes top-level metadata, looks up the target, checks the local +// cache, and downloads the artifact into --target-dir if not already present. +func newDownloadCmd() *cobra.Command { + var ( + targetName string + targetBaseURL string + targetDir string + ) + + cmd := &cobra.Command{ + Use: "download", + Short: "Download and verify a target artifact", + Long: `Refresh metadata, then download the artifact identified by --target-name from +--target-base-url and store it in --target-dir. + +If the artifact is already cached with matching hashes it will not be +re-downloaded.`, + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + configureLogger("download") + + if metadataDir == "" { + return fmt.Errorf("--metadata-dir is required") + } + if metadataURL == "" { + return fmt.Errorf("--metadata-url is required") + } + if targetName == "" { + return fmt.Errorf("--target-name is required") + } + if targetBaseURL == "" { + return fmt.Errorf("--target-base-url is required") + } + if targetDir == "" { + return fmt.Errorf("--target-dir is required") + } + + rootBytes, err := os.ReadFile(filepath.Join(metadataDir, "root.json")) + if err != nil { + return fmt.Errorf("read local root.json: %w", err) + } + + cfg, err := config.New(metadataURL, rootBytes) + if err != nil { + return fmt.Errorf("create updater config: %w", err) + } + cfg.LocalMetadataDir = metadataDir + cfg.LocalTargetsDir = targetDir + cfg.RemoteTargetsURL = targetBaseURL + + up, err := updater.New(cfg) + if err != nil { + return fmt.Errorf("create updater: %w", err) + } + + if err := up.Refresh(); err != nil { + return fmt.Errorf("refresh: %w", err) + } + + info, err := up.GetTargetInfo(targetName) + if err != nil { + return fmt.Errorf("get target info %q: %w", targetName, err) + } + + // Check if the artifact is already cached. + if path, _, err := up.FindCachedTarget(info, ""); err == nil && path != "" { + fmt.Fprintln(os.Stderr, "download: target already cached at", path) + return nil + } + + path, _, err := up.DownloadTarget(info, "", "") + if err != nil { + return fmt.Errorf("download target %q: %w", targetName, err) + } + + fmt.Fprintln(os.Stderr, "download: stored target at", path) + return nil + }, + } + + cmd.Flags().StringVar(&targetName, "target-name", "", "TUF targetpath of the artifact (required)") + cmd.Flags().StringVar(&targetBaseURL, "target-base-url", "", "base URL for the target store (required)") + cmd.Flags().StringVar(&targetDir, "target-dir", "", "directory to store downloaded artifacts (required)") + + return cmd +} diff --git a/internal/testutils/README.md b/internal/testutils/README.md new file mode 100644 index 00000000..6dcdb65d --- /dev/null +++ b/internal/testutils/README.md @@ -0,0 +1,168 @@ +# testutils + +Internal test utilities for the go-tuf project. + +## Directory layout + +``` +internal/testutils/ +├── README.md # this file +├── setup.go # legacy setup helpers (compatibility) +├── helpers/ +│ ├── helpers.go # core test helper functions +│ ├── fuzz.go # fuzz data generation utilities +│ └── updater.go # updater-specific test helpers +├── rsapss/ # RSA-PSS signing utilities +├── signer/ # generic signing test utilities +└── simulator/ + ├── config.go + ├── repository_simulator.go # HTTP-free TUF repository simulator + ├── repository_simulator_setup.go + ├── util.go + ├── builder.go # SimulatorBuilder fluent API + └── test_repository.go # TestRepository — isolated test state +``` + +## helpers package + +### Core utilities (`helpers.go`) + +```go +// File I/O +helpers.WriteTestFile(t, dir, "file.json", data) // writes and returns path +helpers.ReadTestFile(t, path) // reads or fatals + +// JSON +helpers.CompareJSON(t, got, want) // normalise whitespace then compare +helpers.MustMarshal(t, v) // marshal or fatal +helpers.MustUnmarshal[T](t, data) // unmarshal or fatal + +// Assertions +helpers.AssertNoError(t, err) // fatal on non-nil error +helpers.AssertErrorContains(t, err, msg) // fatal if err is nil or missing msg + +// Keys +helpers.GenerateTestKeyPair(t) // Ed25519 pub/priv or fatal + +// Miscellaneous +helpers.StripWhitespace(data) // strip ASCII whitespace from bytes +helpers.CreateInvalidJSON() // map of named invalid JSON snippets +``` + +### JSON fixture builders (`helpers.go`) + +These functions do **not** require `*testing.T`, making them safe to use in +fuzz seed setup (`f.Add(...)`) without the `&testing.T{}` anti-pattern: + +```go +helpers.BuildRootJSON() // []byte — minimal valid root.json +helpers.BuildTargetsJSON() // []byte — minimal valid targets.json +helpers.BuildSnapshotJSON() // []byte — minimal valid snapshot.json +helpers.BuildTimestampJSON() // []byte — minimal valid timestamp.json +``` + +`CreateTest{Root,Targets,Snapshot,Timestamp}JSON(t)` are thin `t.Helper()` +wrappers around the builders, kept for backwards compatibility. + +### Fuzz utilities (`fuzz.go`) + +```go +gen := helpers.NewFuzzDataGenerator(seed1, seed2 uint64) + +gen.GenerateRandomString(length int) string +gen.GenerateRandomBytes(n int) []byte +gen.GenerateRandomInt(max int) int +gen.GenerateRandomTime() time.Time +gen.GenerateRandomJSON() []byte +gen.GenerateCorruptedJSON() []byte +gen.GenerateRandomMetadataFields() map[string]any +gen.GenerateRandomSignature() map[string]any +gen.GenerateRandomKey() map[string]any +gen.CreateFuzzTestMetadata(metadataType string) []byte + +// Register standard seeds and run f.Fuzz: +helpers.FuzzMetadataOperations(f, func(data []byte) error { ... }) +``` + +### Updater utilities (`updater.go`) + +```go +helpers.ContainsTargetWithPrefix(targets []string, prefix string) bool +helpers.AssertTargetDownloaded(t, targetDir, targetName string) +helpers.AssertTargetNotDownloaded(t, targetDir, targetName string) +``` + +## simulator package + +### TestRepository + +`TestRepository` wraps a `RepositorySimulator` with isolated temporary +directories (created via `t.TempDir()` — auto-cleaned when the test ends). + +```go +repo := simulator.NewTestRepository(t) +defer repo.Cleanup() // no-op; kept for call-site clarity + +// Updater configuration +cfg, err := repo.GetUpdaterConfig() +cfg, err := repo.GetUnsafeUpdaterConfig() + +// Metadata manipulation +repo.PublishRoot() +repo.BumpVersion(role) // root: also calls PublishRoot +repo.SetExpired(role) +repo.SetExpiresAt(role, t) +repo.SetVersion(role, v) +repo.GetVersion(role) int64 +repo.RemoveSigners(role) +repo.RotateKeys(role) +repo.AddTarget(role, content, path) +repo.UpdateSnapshot() +repo.UpdateTimestamp() +repo.EnableComputeHashesAndLength() +repo.DisableComputeHashesAndLength() +repo.ReloadRootBytes() error +repo.WriteRoot(version int) error +repo.SetSnapshotMeta(role string, version int64) +repo.SetTimestampSnapshotMeta(version int64) +repo.PastTime() time.Time + +// Assertions +repo.AssertFilesExist(roles []string) +repo.AssertFilesExact(roles []string) +repo.AssertVersionEquals(role string, expectedVersion int64) +repo.AssertContentEquals(role string, version *int) +``` + +### SimulatorBuilder + +A fluent builder for constructing pre-configured simulators: + +```go +sim := simulator.NewSimulator(). + WithConsistentSnapshot(false). + WithTarget("path/to/file.txt", []byte("content")). + WithExpiredRole(metadata.TIMESTAMP). + WithRootRotations(2). + Build(t) + +repo := simulator.NewTestRepositoryWithBuilder(t, simulator.NewSimulator(). + WithTarget("artifact.txt", []byte("data"))) +``` + +## Running tests + +```bash +# All tests (RSA-1024 compat needed for test fixtures) +GODEBUG=rsa1024min=0 go test -race ./... + +# Specific packages +go test ./metadata/... +go test ./metadata/updater/... + +# Fuzz tests (run for a fixed duration) +go test -fuzz=FuzzRootFromBytes -fuzztime=30s ./metadata/ + +# Benchmarks +go test -bench=. ./metadata/ +``` diff --git a/internal/testutils/helpers/fuzz.go b/internal/testutils/helpers/fuzz.go new file mode 100644 index 00000000..d024b68c --- /dev/null +++ b/internal/testutils/helpers/fuzz.go @@ -0,0 +1,273 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package helpers + +import ( + "encoding/json" + "fmt" + "maps" + "math/rand/v2" + "strings" + "testing" + "time" +) + +// FuzzDataGenerator produces deterministic pseudo-random data for fuzz seeds. +// Use [NewFuzzDataGenerator] to construct one with a fixed seed for +// reproducible seed corpora, or use 0 for a randomly seeded source. +type FuzzDataGenerator struct { + rng *rand.Rand +} + +// NewFuzzDataGenerator returns a generator backed by a PCG source seeded with +// seed1 and seed2. For a single uint64 seed, pass the same value twice. +func NewFuzzDataGenerator(seed1, seed2 uint64) *FuzzDataGenerator { + return &FuzzDataGenerator{ + rng: rand.New(rand.NewPCG(seed1, seed2)), + } +} + +// GenerateRandomString returns a random alphanumeric string of the given length. +func (f *FuzzDataGenerator) GenerateRandomString(length int) string { + const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + b := make([]byte, length) + for i := range b { + b[i] = charset[f.rng.IntN(len(charset))] + } + return string(b) +} + +// GenerateRandomBytes returns a slice of n pseudo-random bytes. +func (f *FuzzDataGenerator) GenerateRandomBytes(n int) []byte { + b := make([]byte, n) + for i := range b { + b[i] = byte(f.rng.IntN(256)) + } + return b +} + +// GenerateRandomInt returns a non-negative pseudo-random int in [0, max). +func (f *FuzzDataGenerator) GenerateRandomInt(max int) int { + return f.rng.IntN(max) +} + +// GenerateRandomTime returns a pseudo-random time between 2020 and 2030 (UTC). +func (f *FuzzDataGenerator) GenerateRandomTime() time.Time { + start := time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC) + end := time.Date(2030, 12, 31, 23, 59, 59, 0, time.UTC) + delta := end.Sub(start) + return start.Add(time.Duration(f.rng.Int64N(int64(delta)))) +} + +// GenerateRandomJSON returns a JSON object that resembles TUF metadata but +// with random field values. +func (f *FuzzDataGenerator) GenerateRandomJSON() []byte { + data := map[string]any{ + "signed": map[string]any{ + "_type": f.GenerateRandomString(f.rng.IntN(20) + 1), + "version": f.rng.IntN(1000), + "spec_version": f.GenerateRandomString(10), + "expires": f.GenerateRandomTime().Format(time.RFC3339), + }, + "signatures": []map[string]any{ + { + "keyid": f.GenerateRandomString(64), + "sig": f.GenerateRandomString(128), + }, + }, + } + out, _ := json.Marshal(data) + return out +} + +// GenerateCorruptedJSON returns one of several varieties of intentionally +// malformed JSON, chosen pseudo-randomly. +func (f *FuzzDataGenerator) GenerateCorruptedJSON() []byte { + corruptors := []func() []byte{ + // Truncated JSON + func() []byte { + valid := f.GenerateRandomJSON() + if len(valid) > 10 { + return valid[:len(valid)/2] + } + return valid + }, + // Replaced colons with random strings + func() []byte { + return []byte(strings.ReplaceAll(string(f.GenerateRandomJSON()), ":", f.GenerateRandomString(5))) + }, + // Deeply nested object + func() []byte { + depth := f.rng.IntN(100) + 1 + var sb strings.Builder + for i := range depth { + fmt.Fprintf(&sb, `{"level%d":`, i) + } + sb.WriteString(`"value"`) + for range depth { + sb.WriteByte('}') + } + return []byte(sb.String()) + }, + // Very long string + func() []byte { + long := f.GenerateRandomString(f.rng.IntN(10000) + 1000) + return fmt.Appendf(nil, `{"long_string":"%s"}`, long) + }, + // Arbitrary bytes injected into a JSON string + func() []byte { + payload := f.GenerateRandomBytes(50) + prefix := []byte(`{"test":"`) + suffix := []byte(`"}`) + return append(prefix, append(payload, suffix...)...) + }, + } + return corruptors[f.rng.IntN(len(corruptors))]() +} + +// GenerateRandomMetadataFields returns a map of random values for common TUF +// metadata fields. +func (f *FuzzDataGenerator) GenerateRandomMetadataFields() map[string]any { + return map[string]any{ + "version": f.rng.IntN(1_000_000), + "spec_version": f.GenerateRandomString(f.rng.IntN(20) + 1), + "expires": f.GenerateRandomTime().Format(time.RFC3339), + "type": f.GenerateRandomString(f.rng.IntN(20) + 1), + "length": f.rng.IntN(1_000_000), + "hashes": map[string]string{ + "sha256": f.GenerateRandomString(64), + "sha512": f.GenerateRandomString(128), + }, + "keyids": []string{f.GenerateRandomString(64), f.GenerateRandomString(64)}, + "threshold": f.rng.IntN(10) + 1, + "custom": map[string]any{ + "random_field": f.GenerateRandomString(100), + "number": f.rng.IntN(1000), + }, + } +} + +// GenerateRandomSignature returns a random TUF-like signature map. +func (f *FuzzDataGenerator) GenerateRandomSignature() map[string]any { + return map[string]any{ + "keyid": f.GenerateRandomString(64), + "sig": f.GenerateRandomString(f.rng.IntN(200) + 50), + } +} + +// GenerateRandomKey returns a random TUF-like key map. +func (f *FuzzDataGenerator) GenerateRandomKey() map[string]any { + keyTypes := []string{"ed25519", "rsa", "ecdsa", "unknown"} + schemes := []string{"ed25519", "rsa-pss-sha256", "ecdsa-sha2-nistp256", "unknown"} + return map[string]any{ + "keytype": keyTypes[f.rng.IntN(len(keyTypes))], + "scheme": schemes[f.rng.IntN(len(schemes))], + "keyval": map[string]any{ + "public": f.GenerateRandomString(f.rng.IntN(500) + 50), + }, + } +} + +// CreateFuzzTestMetadata returns a JSON-encoded TUF metadata structure for +// metadataType ("root", "targets", "snapshot", "timestamp") with random field +// values. Useful as fuzz seed input. +func (f *FuzzDataGenerator) CreateFuzzTestMetadata(metadataType string) []byte { + base := map[string]any{ + "signed": map[string]any{ + "_type": metadataType, + }, + "signatures": []any{f.GenerateRandomSignature()}, + } + + signed := base["signed"].(map[string]any) + maps.Copy(signed, f.GenerateRandomMetadataFields()) + + switch metadataType { + case "root": + signed["keys"] = map[string]any{ + f.GenerateRandomString(64): f.GenerateRandomKey(), + } + signed["roles"] = map[string]any{ + "root": map[string]any{ + "keyids": []string{f.GenerateRandomString(64)}, + "threshold": f.rng.IntN(5) + 1, + }, + } + signed["consistent_snapshot"] = f.rng.IntN(2) == 1 + case "targets": + signed["targets"] = map[string]any{ + f.GenerateRandomString(20): f.GenerateRandomMetadataFields()["hashes"], + } + case "snapshot": + signed["meta"] = map[string]any{ + "targets.json": map[string]any{ + "version": f.rng.IntN(1000), + "hashes": f.GenerateRandomMetadataFields()["hashes"], + "length": f.rng.IntN(10000), + }, + } + case "timestamp": + signed["meta"] = map[string]any{ + "snapshot.json": map[string]any{ + "version": f.rng.IntN(1000), + "hashes": f.GenerateRandomMetadataFields()["hashes"], + "length": f.rng.IntN(10000), + }, + } + } + + out, _ := json.Marshal(base) + return out +} + +// FuzzMetadataOperations registers seeds and runs f.Fuzz against operation. +// The operation must never panic regardless of input; errors are acceptable. +// +// Seed data is taken from [BuildRootJSON], [BuildTargetsJSON], +// [BuildSnapshotJSON], and [BuildTimestampJSON] (no *testing.T required) plus +// a selection of corrupted JSON variants. +func FuzzMetadataOperations(f *testing.F, operation func(data []byte) error) { + f.Helper() + + // Add valid metadata as seeds using the builder functions (no *testing.T). + f.Add(BuildRootJSON()) + f.Add(BuildTargetsJSON()) + f.Add(BuildSnapshotJSON()) + f.Add(BuildTimestampJSON()) + + // Add edge cases. + f.Add([]byte("")) + f.Add([]byte("{}")) + f.Add([]byte("null")) + f.Add([]byte("[]")) + + // Add some corrupted-data seeds. + gen := NewFuzzDataGenerator(0xdeadbeef, 0xcafebabe) + for range 10 { + f.Add(gen.GenerateCorruptedJSON()) + } + + f.Fuzz(func(t *testing.T, data []byte) { + defer func() { + if r := recover(); r != nil { + t.Errorf("operation panicked with input %q: %v", data, r) + } + }() + _ = operation(data) + }) +} diff --git a/internal/testutils/helpers/helpers.go b/internal/testutils/helpers/helpers.go new file mode 100644 index 00000000..541a107f --- /dev/null +++ b/internal/testutils/helpers/helpers.go @@ -0,0 +1,299 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +// Package helpers provides composable test utilities for go-tuf tests. +// +// Design principles: +// - All helpers accept *testing.T or *testing.B and call t.Helper() first. +// - Use t.TempDir() instead of custom directory managers; the testing package +// cleans up automatically. +// - No third-party assertion libraries: helpers signal failures via t.Errorf +// and t.Fatalf, matching the standard library style. +// - JSON fixture builders do not depend on *testing.T so they can be used +// in fuzz seed functions without the &testing.T{} anti-pattern. +package helpers + +import ( + "bytes" + "crypto/ed25519" + "crypto/rand" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + "time" +) + +// WriteTestFile writes content to a file in the given directory and returns the +// full path. The test fails immediately if the write fails. +func WriteTestFile(t *testing.T, dir, filename string, content []byte) string { + t.Helper() + path := filepath.Join(dir, filename) + if err := os.WriteFile(path, content, 0644); err != nil { + t.Fatalf("WriteTestFile(%q): %v", path, err) + } + return path +} + +// ReadTestFile reads and returns the content of a file. The test fails +// immediately if the read fails. +func ReadTestFile(t *testing.T, path string) []byte { + t.Helper() + data, err := os.ReadFile(path) + if err != nil { + t.Fatalf("ReadTestFile(%q): %v", path, err) + } + return data +} + +// StripWhitespace removes all ASCII whitespace characters from b. +func StripWhitespace(data []byte) []byte { + result := make([]byte, 0, len(data)) + for _, b := range data { + if b != ' ' && b != '\t' && b != '\n' && b != '\r' { + result = append(result, b) + } + } + return result +} + +// CompareJSON asserts that got and want represent the same JSON value, +// normalising away whitespace differences. The test is marked failed (but +// not stopped) when they differ. +func CompareJSON(t *testing.T, got, want []byte) { + t.Helper() + + var gotVal, wantVal any + if err := json.Unmarshal(got, &gotVal); err != nil { + t.Fatalf("CompareJSON: unmarshal got: %v", err) + } + if err := json.Unmarshal(want, &wantVal); err != nil { + t.Fatalf("CompareJSON: unmarshal want: %v", err) + } + + gotNorm, err := json.Marshal(gotVal) + if err != nil { + t.Fatalf("CompareJSON: re-marshal got: %v", err) + } + wantNorm, err := json.Marshal(wantVal) + if err != nil { + t.Fatalf("CompareJSON: re-marshal want: %v", err) + } + + if !bytes.Equal(gotNorm, wantNorm) { + t.Errorf("JSON mismatch:\ngot: %s\nwant: %s", gotNorm, wantNorm) + } +} + +// GenerateTestKeyPair generates a fresh Ed25519 key pair. The test fails +// immediately if key generation fails. +func GenerateTestKeyPair(t *testing.T) (ed25519.PublicKey, ed25519.PrivateKey) { + t.Helper() + pub, priv, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + t.Fatalf("GenerateTestKeyPair: %v", err) + } + return pub, priv +} + +// AssertErrorContains fails the test if err is nil or if its message does not +// contain expectedMsg. +func AssertErrorContains(t *testing.T, err error, expectedMsg string) { + t.Helper() + if err == nil { + t.Fatalf("expected error containing %q, got nil", expectedMsg) + } + if !strings.Contains(err.Error(), expectedMsg) { + t.Fatalf("expected error containing %q, got %q", expectedMsg, err.Error()) + } +} + +// AssertNoError fails the test immediately if err is non-nil. +func AssertNoError(t *testing.T, err error) { + t.Helper() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } +} + +// MustMarshal marshals v to JSON. The test fails immediately if marshalling +// fails. +func MustMarshal(t *testing.T, v any) []byte { + t.Helper() + data, err := json.Marshal(v) + if err != nil { + t.Fatalf("MustMarshal: %v", err) + } + return data +} + +// MustUnmarshal unmarshals data into a value of type T. The test fails +// immediately if unmarshalling fails. +func MustUnmarshal[T any](t *testing.T, data []byte) T { + t.Helper() + var result T + if err := json.Unmarshal(data, &result); err != nil { + t.Fatalf("MustUnmarshal: %v", err) + } + return result +} + +// CreateInvalidJSON returns a map of named byte slices that represent various +// kinds of invalid or malformed TUF metadata. Useful for error-path tests. +func CreateInvalidJSON() map[string][]byte { + return map[string][]byte{ + "empty": []byte(""), + "invalid_json": []byte("{invalid json}"), + "missing_signed": []byte(`{"signatures": []}`), + "wrong_type": []byte(`{"signed": {"_type": "wrong"}, "signatures": []}`), + "missing_version": []byte(`{"signed": {"_type": "root"}, "signatures": []}`), + "negative_version": []byte(`{"signed": {"_type": "root", "version": -1}, "signatures": []}`), + } +} + +// HexBytes is a convenience type alias for testing scenarios that require +// hex-encoded byte slices without importing the full metadata package. +type HexBytes []byte + +func (h HexBytes) String() string { + return fmt.Sprintf("%x", []byte(h)) +} + +// — JSON fixture builders — +// +// These functions do not accept *testing.T so they can be called from fuzz +// seed setup (f.Add) without the &testing.T{} anti-pattern. + +// BuildRootJSON returns a minimal, valid root.json body as JSON bytes. +func BuildRootJSON() []byte { + expiry := time.Now().UTC().Add(24 * time.Hour) + root := map[string]any{ + "signed": map[string]any{ + "_type": "root", + "spec_version": "1.0.31", + "version": 1, + "expires": expiry.Format(time.RFC3339), + "consistent_snapshot": true, + "keys": map[string]any{}, + "roles": map[string]any{ + "root": map[string]any{"keyids": []string{}, "threshold": 1}, + "targets": map[string]any{"keyids": []string{}, "threshold": 1}, + "snapshot": map[string]any{"keyids": []string{}, "threshold": 1}, + "timestamp": map[string]any{"keyids": []string{}, "threshold": 1}, + }, + }, + "signatures": []any{}, + } + data, err := json.Marshal(root) + if err != nil { + panic(fmt.Sprintf("BuildRootJSON: %v", err)) + } + return data +} + +// BuildTargetsJSON returns a minimal, valid targets.json body as JSON bytes. +func BuildTargetsJSON() []byte { + expiry := time.Now().UTC().Add(24 * time.Hour) + targets := map[string]any{ + "signed": map[string]any{ + "_type": "targets", + "spec_version": "1.0.31", + "version": 1, + "expires": expiry.Format(time.RFC3339), + "targets": map[string]any{}, + }, + "signatures": []any{}, + } + data, err := json.Marshal(targets) + if err != nil { + panic(fmt.Sprintf("BuildTargetsJSON: %v", err)) + } + return data +} + +// BuildSnapshotJSON returns a minimal, valid snapshot.json body as JSON bytes. +func BuildSnapshotJSON() []byte { + expiry := time.Now().UTC().Add(24 * time.Hour) + snapshot := map[string]any{ + "signed": map[string]any{ + "_type": "snapshot", + "spec_version": "1.0.31", + "version": 1, + "expires": expiry.Format(time.RFC3339), + "meta": map[string]any{ + "targets.json": map[string]any{"version": 1}, + }, + }, + "signatures": []any{}, + } + data, err := json.Marshal(snapshot) + if err != nil { + panic(fmt.Sprintf("BuildSnapshotJSON: %v", err)) + } + return data +} + +// BuildTimestampJSON returns a minimal, valid timestamp.json body as JSON bytes. +func BuildTimestampJSON() []byte { + expiry := time.Now().UTC().Add(24 * time.Hour) + timestamp := map[string]any{ + "signed": map[string]any{ + "_type": "timestamp", + "spec_version": "1.0.31", + "version": 1, + "expires": expiry.Format(time.RFC3339), + "meta": map[string]any{ + "snapshot.json": map[string]any{"version": 1}, + }, + }, + "signatures": []any{}, + } + data, err := json.Marshal(timestamp) + if err != nil { + panic(fmt.Sprintf("BuildTimestampJSON: %v", err)) + } + return data +} + +// — Test-context variants (t.Helper wrappers for backwards compatibility) — + +// CreateTestRootJSON returns BuildRootJSON() and registers a test helper. +// Prefer calling BuildRootJSON() directly when not inside a test function. +func CreateTestRootJSON(t *testing.T) []byte { + t.Helper() + return BuildRootJSON() +} + +// CreateTestTargetsJSON returns BuildTargetsJSON() and registers a test helper. +func CreateTestTargetsJSON(t *testing.T) []byte { + t.Helper() + return BuildTargetsJSON() +} + +// CreateTestSnapshotJSON returns BuildSnapshotJSON() and registers a test helper. +func CreateTestSnapshotJSON(t *testing.T) []byte { + t.Helper() + return BuildSnapshotJSON() +} + +// CreateTestTimestampJSON returns BuildTimestampJSON() and registers a test helper. +func CreateTestTimestampJSON(t *testing.T) []byte { + t.Helper() + return BuildTimestampJSON() +} diff --git a/internal/testutils/helpers/updater.go b/internal/testutils/helpers/updater.go new file mode 100644 index 00000000..87e4134f --- /dev/null +++ b/internal/testutils/helpers/updater.go @@ -0,0 +1,173 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package helpers + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + "time" +) + +// UpdaterTestCase defines a single table-driven updater test scenario. +// It is intentionally decoupled from the updater package to avoid import cycles. +type UpdaterTestCase struct { + // Name is the subtest name passed to t.Run. + Name string + + // Desc is an optional human-readable description logged with t.Logf. + Desc string + + // WantErr indicates that the action under test must return a non-nil error. + WantErr bool + + // WantErrType is an error value compared with errors.Is. Only checked when + // WantErr is true. + WantErrType error + + // WantErrMsg is a substring that must appear in the error message. Only + // checked when WantErr is true. + WantErrMsg string + + // RefTime is a reference time injected into updater tests (e.g. for + // expiry testing). + RefTime time.Time + + // UseUnsafeMode enables UnsafeLocalMode for this test case. + UseUnsafeMode bool +} + +// CheckError validates error expectations for a single UpdaterTestCase. +// Call it immediately after the operation under test. +func CheckError(t *testing.T, tc UpdaterTestCase, err error) { + t.Helper() + + if tc.WantErr { + if err == nil { + t.Errorf("CheckError(%q): expected error, got nil", tc.Name) + return + } + if tc.WantErrType != nil && !errors.Is(err, tc.WantErrType) { + t.Errorf("CheckError(%q): expected error type %T, got %T: %v", + tc.Name, tc.WantErrType, err, err) + } + if tc.WantErrMsg != "" && !strings.Contains(err.Error(), tc.WantErrMsg) { + t.Errorf("CheckError(%q): expected error message containing %q, got %q", + tc.Name, tc.WantErrMsg, err.Error()) + } + } else if err != nil { + t.Errorf("CheckError(%q): unexpected error: %v", tc.Name, err) + } +} + +// AssertFilesExist asserts that each role in roles has a corresponding +// ".json" file in metadataDir. +func AssertFilesExist(t *testing.T, metadataDir string, roles []string) { + t.Helper() + for _, role := range roles { + path := filepath.Join(metadataDir, fmt.Sprintf("%s.json", role)) + if _, err := os.Stat(path); os.IsNotExist(err) { + t.Errorf("AssertFilesExist: expected %s.json not found in %s", role, metadataDir) + } + } +} + +// AssertFilesExact asserts that metadataDir contains exactly the files named +// ".json" for each role in roles — no more, no fewer. +func AssertFilesExact(t *testing.T, metadataDir string, roles []string) { + t.Helper() + + expected := make(map[string]bool, len(roles)) + for _, role := range roles { + expected[fmt.Sprintf("%s.json", role)] = true + } + + entries, err := os.ReadDir(metadataDir) + if err != nil { + t.Fatalf("AssertFilesExact: ReadDir(%q): %v", metadataDir, err) + } + + actual := make(map[string]bool, len(entries)) + for _, e := range entries { + actual[e.Name()] = true + } + + for name := range expected { + if !actual[name] { + t.Errorf("AssertFilesExact: expected file %q not found in %s", name, metadataDir) + } + } + for name := range actual { + if !expected[name] { + t.Errorf("AssertFilesExact: unexpected file %q found in %s", name, metadataDir) + } + } +} + +// TrustedMetadataTestCase defines a single table-driven test for TrustedMetadata +// operations. The Setup function returns the raw bytes to operate on; Action +// receives those bytes and returns an error (or nil on success). +type TrustedMetadataTestCase struct { + Name string + Desc string + Setup func(t *testing.T) []byte + Action func(t *testing.T, data []byte) error + WantErr bool + WantErrType error + WantErrMsg string +} + +// RunTrustedMetadataTests executes a slice of TrustedMetadataTestCase entries +// as subtests. +func RunTrustedMetadataTests(t *testing.T, tests []TrustedMetadataTestCase) { + t.Helper() + for _, tc := range tests { + t.Run(tc.Name, func(t *testing.T) { + if tc.Desc != "" { + t.Logf("desc: %s", tc.Desc) + } + + var data []byte + if tc.Setup != nil { + data = tc.Setup(t) + } + + err := tc.Action(t, data) + + if tc.WantErr { + if err == nil { + t.Errorf("expected error, got nil") + return + } + if tc.WantErrType != nil && !errors.Is(err, tc.WantErrType) { + t.Errorf("expected error type %T, got %T: %v", + tc.WantErrType, err, err) + } + if tc.WantErrMsg != "" && !strings.Contains(err.Error(), tc.WantErrMsg) { + t.Errorf("expected error containing %q, got %q", + tc.WantErrMsg, err.Error()) + } + } else if err != nil { + t.Errorf("unexpected error: %v", err) + } + }) + } +} diff --git a/internal/testutils/simulator/builder.go b/internal/testutils/simulator/builder.go new file mode 100644 index 00000000..616d69e5 --- /dev/null +++ b/internal/testutils/simulator/builder.go @@ -0,0 +1,263 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package simulator + +import ( + "testing" + "time" + + "github.com/theupdateframework/go-tuf/v2/metadata" +) + +// DelegationSpec defines a delegation to be created in the repository +type DelegationSpec struct { + DelegatorName string + Role metadata.DelegatedRole + Targets metadata.TargetsType +} + +// SimulatorBuilder creates configured RepositorySimulator instances using a fluent API +type SimulatorBuilder struct { + expiry time.Duration + consistentSnapshot bool + computeHashesAndLength bool + prefixTargetsWithHash bool + targets map[string][]byte + delegations []DelegationSpec + expiredRoles map[string]bool + roleVersions map[string]int64 + unsignedRoles map[string]bool + succinctRoles map[string]succinctRoleSpec + rootRotations int + timestampVersionBump int64 + snapshotVersionBump int64 + targetsVersionBump int64 +} + +type succinctRoleSpec struct { + bitLength int + namePrefix string +} + +// NewSimulator creates a new SimulatorBuilder with sensible defaults +func NewSimulator() *SimulatorBuilder { + return &SimulatorBuilder{ + expiry: 30 * 24 * time.Hour, + consistentSnapshot: true, + computeHashesAndLength: false, + prefixTargetsWithHash: true, + targets: make(map[string][]byte), + delegations: []DelegationSpec{}, + expiredRoles: make(map[string]bool), + roleVersions: make(map[string]int64), + unsignedRoles: make(map[string]bool), + succinctRoles: make(map[string]succinctRoleSpec), + rootRotations: 0, + } +} + +// WithExpiry sets the default metadata expiration duration +func (b *SimulatorBuilder) WithExpiry(d time.Duration) *SimulatorBuilder { + b.expiry = d + return b +} + +// WithConsistentSnapshot enables or disables consistent snapshots +func (b *SimulatorBuilder) WithConsistentSnapshot(enabled bool) *SimulatorBuilder { + b.consistentSnapshot = enabled + return b +} + +// WithComputeHashesAndLength enables or disables hash/length computation for meta files +func (b *SimulatorBuilder) WithComputeHashesAndLength(enabled bool) *SimulatorBuilder { + b.computeHashesAndLength = enabled + return b +} + +// WithPrefixTargetsWithHash enables or disables hash-prefixed target file names +func (b *SimulatorBuilder) WithPrefixTargetsWithHash(enabled bool) *SimulatorBuilder { + b.prefixTargetsWithHash = enabled + return b +} + +// WithTarget adds a target file to the repository +func (b *SimulatorBuilder) WithTarget(path string, content []byte) *SimulatorBuilder { + b.targets[path] = content + return b +} + +// WithTargets adds multiple target files to the repository +func (b *SimulatorBuilder) WithTargets(targets map[string][]byte) *SimulatorBuilder { + for path, content := range targets { + b.targets[path] = content + } + return b +} + +// WithExpiredRole marks a role's metadata as expired +func (b *SimulatorBuilder) WithExpiredRole(role string) *SimulatorBuilder { + b.expiredRoles[role] = true + return b +} + +// WithVersion sets the version for a specific role +func (b *SimulatorBuilder) WithVersion(role string, version int64) *SimulatorBuilder { + b.roleVersions[role] = version + return b +} + +// WithoutSigners removes signers for a role (for testing unsigned metadata) +func (b *SimulatorBuilder) WithoutSigners(role string) *SimulatorBuilder { + b.unsignedRoles[role] = true + return b +} + +// WithDelegation adds a delegated targets role +func (b *SimulatorBuilder) WithDelegation(delegatorName string, role metadata.DelegatedRole, targets metadata.TargetsType) *SimulatorBuilder { + b.delegations = append(b.delegations, DelegationSpec{ + DelegatorName: delegatorName, + Role: role, + Targets: targets, + }) + return b +} + +// WithSuccinctRoles adds succinct roles to a delegator +func (b *SimulatorBuilder) WithSuccinctRoles(delegatorName string, bitLength int, namePrefix string) *SimulatorBuilder { + b.succinctRoles[delegatorName] = succinctRoleSpec{ + bitLength: bitLength, + namePrefix: namePrefix, + } + return b +} + +// WithRootRotations performs the specified number of root rotations +func (b *SimulatorBuilder) WithRootRotations(count int) *SimulatorBuilder { + b.rootRotations = count + return b +} + +// WithTimestampVersionBump bumps the timestamp version by the specified amount +func (b *SimulatorBuilder) WithTimestampVersionBump(bump int64) *SimulatorBuilder { + b.timestampVersionBump = bump + return b +} + +// WithSnapshotVersionBump bumps the snapshot version by the specified amount +func (b *SimulatorBuilder) WithSnapshotVersionBump(bump int64) *SimulatorBuilder { + b.snapshotVersionBump = bump + return b +} + +// WithTargetsVersionBump bumps the targets version by the specified amount +func (b *SimulatorBuilder) WithTargetsVersionBump(bump int64) *SimulatorBuilder { + b.targetsVersionBump = bump + return b +} + +// Build creates the configured RepositorySimulator +func (b *SimulatorBuilder) Build(t *testing.T) *RepositorySimulator { + t.Helper() + + sim := NewRepository() + + // Configure consistent snapshot + sim.MDRoot.Signed.ConsistentSnapshot = b.consistentSnapshot + sim.PrefixTargetsWithHash = b.prefixTargetsWithHash + sim.ComputeMetafileHashesAndLength = b.computeHashesAndLength + + // Set expiry time + now := time.Now().UTC() + safeExpiry := now.Truncate(time.Second).Add(b.expiry) + sim.SafeExpiry = safeExpiry + sim.MDRoot.Signed.Expires = safeExpiry + sim.MDTargets.Signed.Expires = safeExpiry + sim.MDSnapshot.Signed.Expires = safeExpiry + sim.MDTimestamp.Signed.Expires = safeExpiry + + // Apply expired roles (use a past time) + pastTime := now.Add(-5 * 24 * time.Hour) + for role := range b.expiredRoles { + switch role { + case metadata.ROOT: + sim.MDRoot.Signed.Expires = pastTime + case metadata.TARGETS: + sim.MDTargets.Signed.Expires = pastTime + case metadata.SNAPSHOT: + sim.MDSnapshot.Signed.Expires = pastTime + case metadata.TIMESTAMP: + sim.MDTimestamp.Signed.Expires = pastTime + } + } + + // Apply version settings + for role, version := range b.roleVersions { + switch role { + case metadata.ROOT: + sim.MDRoot.Signed.Version = version + case metadata.TARGETS: + sim.MDTargets.Signed.Version = version + case metadata.SNAPSHOT: + sim.MDSnapshot.Signed.Version = version + case metadata.TIMESTAMP: + sim.MDTimestamp.Signed.Version = version + } + } + + // Apply version bumps + if b.timestampVersionBump > 0 { + sim.MDTimestamp.Signed.Version += b.timestampVersionBump + } + if b.snapshotVersionBump > 0 { + sim.MDSnapshot.Signed.Version += b.snapshotVersionBump + } + if b.targetsVersionBump > 0 { + sim.MDTargets.Signed.Version += b.targetsVersionBump + } + + // Add targets + for path, content := range b.targets { + sim.AddTarget(metadata.TARGETS, content, path) + } + + // Add delegations + for _, delegation := range b.delegations { + sim.AddDelegation(delegation.DelegatorName, delegation.Role, delegation.Targets) + } + + // Add succinct roles + for delegatorName, spec := range b.succinctRoles { + sim.AddSuccinctRoles(delegatorName, spec.bitLength, spec.namePrefix) + } + + // Perform root rotations + for i := 0; i < b.rootRotations; i++ { + sim.MDRoot.Signed.Version++ + sim.PublishRoot() + } + + // Remove signers for unsigned roles (do this after other setup) + for role := range b.unsignedRoles { + delete(sim.Signers, role) + } + + // Update snapshot and timestamp to reflect changes + sim.UpdateSnapshot() + + return sim +} diff --git a/internal/testutils/simulator/config.go b/internal/testutils/simulator/config.go new file mode 100644 index 00000000..67ea9380 --- /dev/null +++ b/internal/testutils/simulator/config.go @@ -0,0 +1,34 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package simulator + +import "github.com/theupdateframework/go-tuf/v2/metadata" + +type SimulatorOption interface { + apply(*RepositorySimulator) +} + +type DelegatesOption map[string]metadata.Metadata[metadata.TargetsType] + +func (o DelegatesOption) apply(s *RepositorySimulator) { + s.MDDelegates = o +} + +func WithDelegates(delegates map[string]metadata.Metadata[metadata.TargetsType]) SimulatorOption { + return DelegatesOption(delegates) +} diff --git a/internal/testutils/simulator/repository_simulator.go b/internal/testutils/simulator/repository_simulator.go index 68f5245d..9dbb9d6d 100644 --- a/internal/testutils/simulator/repository_simulator.go +++ b/internal/testutils/simulator/repository_simulator.go @@ -51,15 +51,12 @@ package simulator import ( "bytes" - "crypto" - "crypto/ed25519" "crypto/sha256" "fmt" "log/slog" "net/url" "os" "path/filepath" - "regexp" "strconv" "strings" "time" @@ -161,7 +158,7 @@ func (rs *RepositorySimulator) setupMinimalValidRepository() { rs.MDRoot = metadata.Root(rs.SafeExpiry) for _, role := range metadata.TOP_LEVEL_ROLE_NAMES { - publicKey, _, signer := CreateKey() + publicKey, _, signer := createKey() mtdkey, err := metadata.KeyFromPublicKey(*publicKey) if err != nil { @@ -217,20 +214,6 @@ func (rs *RepositorySimulator) AllTargets() <-chan metadata.TargetsType { return ch } -func CreateKey() (*ed25519.PublicKey, *ed25519.PrivateKey, *signature.Signer) { - public, private, err := ed25519.GenerateKey(nil) - if err != nil { - slog.Error("Failed to generate key", "err", err) - } - - signer, err := signature.LoadSigner(private, crypto.Hash(0)) - if err != nil { - slog.Error("failed to load signer", "err", err) - } - - return &public, &private, &signer -} - func (rs *RepositorySimulator) AddSigner(role string, keyID string, signer signature.Signer) { if _, ok := rs.Signers[role]; !ok { rs.Signers[role] = make(map[string]*signature.Signer) @@ -246,7 +229,7 @@ func (rs *RepositorySimulator) RotateKeys(role string) { } for i := 0; i < rs.MDRoot.Signed.Roles[role].Threshold; i++ { - publicKey, _, signer := CreateKey() + publicKey, _, signer := createKey() mtdkey, err := metadata.KeyFromPublicKey(*publicKey) if err != nil { slog.Error("Repository simulator: key conversion failed while rotating keys", "err", err) @@ -326,34 +309,6 @@ func (rs *RepositorySimulator) DownloadFile(urlPath string, maxLength int64, _ t return data, err } -func IsWindowsPath(path string) bool { - match, _ := regexp.MatchString(`^[a-zA-Z]:\\`, path) - return match -} - -func trimPrefix(path string, prefix string) (string, error) { - var toTrim string - if IsWindowsPath(path) { - toTrim = path - } else { - parsedURL, e := url.Parse(path) - if e != nil { - return "", e - } - toTrim = parsedURL.Path - } - - return strings.TrimPrefix(toTrim, prefix), nil -} - -func hasPrefix(path, prefix string) bool { - return strings.HasPrefix(filepath.ToSlash(path), prefix) -} - -func hasSuffix(path, prefix string) bool { - return strings.HasSuffix(filepath.ToSlash(path), prefix) -} - func (rs *RepositorySimulator) fetch(urlPath string) ([]byte, error) { path, err := trimPrefix(urlPath, rs.LocalDir) if err != nil { @@ -562,7 +517,7 @@ func (rs *RepositorySimulator) AddDelegation(delegatorName string, role metadata delegator.Delegations.Roles = append(delegator.Delegations.Roles, role) // By default add one new key for the role - publicKey, _, signer := CreateKey() + publicKey, _, signer := createKey() mdkey, err := metadata.KeyFromPublicKey(*publicKey) if err != nil { slog.Error("Repository simulator: key conversion failed while adding delegation", "err", err) @@ -595,7 +550,7 @@ func (rs *RepositorySimulator) AddSuccinctRoles(delegatorName string, bitLength slog.Error("Can't add a SuccinctRoles when delegated roles are used") os.Exit(1) } - publicKey, _, signer := CreateKey() + publicKey, _, signer := createKey() mdkey, err := metadata.KeyFromPublicKey(*publicKey) if err != nil { slog.Error("Repository simulator: key conversion failed while adding succinct roles", "err", err) diff --git a/internal/testutils/simulator/repository_simulator_setup.go b/internal/testutils/simulator/repository_simulator_setup.go index 18b75dfb..8ed7f565 100644 --- a/internal/testutils/simulator/repository_simulator_setup.go +++ b/internal/testutils/simulator/repository_simulator_setup.go @@ -64,7 +64,7 @@ func InitLocalEnv() error { func InitMetadataDir() (*RepositorySimulator, string, string, error) { if err := InitLocalEnv(); err != nil { slog.Error("Failed to initialize environment", "err", err) - os.Exit(1) + return nil, "", "", err } metadataDir := filepath.Join(LocalDir, metadataPath) diff --git a/internal/testutils/simulator/test_repository.go b/internal/testutils/simulator/test_repository.go new file mode 100644 index 00000000..c2e77441 --- /dev/null +++ b/internal/testutils/simulator/test_repository.go @@ -0,0 +1,429 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package simulator + +import ( + "fmt" + "os" + "path/filepath" + "testing" + "time" + + "github.com/theupdateframework/go-tuf/v2/metadata" + "github.com/theupdateframework/go-tuf/v2/metadata/config" +) + +// TestRepository encapsulates all test state for an isolated repository (no globals) +type TestRepository struct { + Simulator *RepositorySimulator + MetadataDir string + TargetsDir string + RootBytes []byte + LocalDir string + t *testing.T + pastTime time.Time +} + +// NewTestRepository creates an isolated test repository with a fresh simulator +func NewTestRepository(t *testing.T) *TestRepository { + t.Helper() + + repo := &TestRepository{ + t: t, + pastTime: time.Now().UTC().Truncate(24 * time.Hour).Add(-5 * 24 * time.Hour), + } + + repo.initializeRepository() + return repo +} + +// NewTestRepositoryWithBuilder creates an isolated test repository using a SimulatorBuilder +func NewTestRepositoryWithBuilder(t *testing.T, builder *SimulatorBuilder) *TestRepository { + t.Helper() + + repo := &TestRepository{ + t: t, + pastTime: time.Now().UTC().Truncate(24 * time.Hour).Add(-5 * 24 * time.Hour), + } + + repo.initializeRepositoryWithBuilder(builder) + return repo +} + +func (r *TestRepository) initializeRepository() { + r.t.Helper() + + // t.TempDir() creates a temporary directory that is automatically removed + // when the test and all its subtests complete. + tmpDir := r.t.TempDir() + r.LocalDir = tmpDir + + metadataDir := filepath.Join(tmpDir, "metadata") + if err := os.MkdirAll(metadataDir, 0750); err != nil { + r.t.Fatalf("failed to create metadata dir: %v", err) + } + r.MetadataDir = metadataDir + + targetsDir := filepath.Join(tmpDir, "targets") + if err := os.MkdirAll(targetsDir, 0750); err != nil { + r.t.Fatalf("failed to create targets dir: %v", err) + } + r.TargetsDir = targetsDir + + // Create and configure the simulator + r.Simulator = NewRepository() + r.Simulator.LocalDir = tmpDir + + // Write initial root metadata + rootPath := filepath.Join(metadataDir, "root.json") + if err := os.WriteFile(rootPath, r.Simulator.SignedRoots[0], 0644); err != nil { + r.t.Fatalf("failed to write root.json: %v", err) + } + + // Read root bytes for config + var err error + r.RootBytes, err = os.ReadFile(rootPath) + if err != nil { + r.t.Fatalf("failed to read root bytes: %v", err) + } +} + +func (r *TestRepository) initializeRepositoryWithBuilder(builder *SimulatorBuilder) { + r.t.Helper() + + tmpDir := r.t.TempDir() + r.LocalDir = tmpDir + + metadataDir := filepath.Join(tmpDir, "metadata") + if err := os.MkdirAll(metadataDir, 0750); err != nil { + r.t.Fatalf("failed to create metadata dir: %v", err) + } + r.MetadataDir = metadataDir + + targetsDir := filepath.Join(tmpDir, "targets") + if err := os.MkdirAll(targetsDir, 0750); err != nil { + r.t.Fatalf("failed to create targets dir: %v", err) + } + r.TargetsDir = targetsDir + + // Build the simulator with the provided configuration + r.Simulator = builder.Build(r.t) + r.Simulator.LocalDir = tmpDir + + // Write initial root metadata + rootPath := filepath.Join(metadataDir, "root.json") + if len(r.Simulator.SignedRoots) > 0 { + if err := os.WriteFile(rootPath, r.Simulator.SignedRoots[0], 0644); err != nil { + r.t.Fatalf("failed to write root.json: %v", err) + } + } + + var err error + r.RootBytes, err = os.ReadFile(rootPath) + if err != nil { + r.t.Fatalf("failed to read root bytes: %v", err) + } +} + +// Cleanup is a no-op: the temporary directory is removed automatically when +// the test completes because it was created with t.TempDir(). It is kept for +// call-site compatibility. +func (r *TestRepository) Cleanup() {} + +// GetUpdaterConfig returns an UpdaterConfig configured to use this test repository +func (r *TestRepository) GetUpdaterConfig() (*config.UpdaterConfig, error) { + cfg, err := config.New(r.MetadataDir, r.RootBytes) + if err != nil { + return nil, err + } + cfg.Fetcher = r.Simulator + cfg.LocalMetadataDir = r.MetadataDir + cfg.LocalTargetsDir = r.TargetsDir + return cfg, nil +} + +// GetUnsafeUpdaterConfig returns an UpdaterConfig with UnsafeLocalMode enabled +func (r *TestRepository) GetUnsafeUpdaterConfig() (*config.UpdaterConfig, error) { + cfg, err := r.GetUpdaterConfig() + if err != nil { + return nil, err + } + cfg.UnsafeLocalMode = true + return cfg, nil +} + +// PublishRoot signs and publishes a new root +func (r *TestRepository) PublishRoot() { + r.Simulator.PublishRoot() +} + +// SetExpired marks a role's metadata as expired +func (r *TestRepository) SetExpired(role string) { + r.SetExpiresAt(role, r.pastTime) +} + +// SetExpiresAt sets the expiration time for a role's metadata +func (r *TestRepository) SetExpiresAt(role string, expires time.Time) { + switch role { + case metadata.ROOT: + r.Simulator.MDRoot.Signed.Expires = expires + case metadata.TARGETS: + r.Simulator.MDTargets.Signed.Expires = expires + case metadata.SNAPSHOT: + r.Simulator.MDSnapshot.Signed.Expires = expires + case metadata.TIMESTAMP: + r.Simulator.MDTimestamp.Signed.Expires = expires + } +} + +// SetVersion changes version for a role +func (r *TestRepository) SetVersion(role string, version int64) { + switch role { + case metadata.ROOT: + r.Simulator.MDRoot.Signed.Version = version + case metadata.TARGETS: + r.Simulator.MDTargets.Signed.Version = version + case metadata.SNAPSHOT: + r.Simulator.MDSnapshot.Signed.Version = version + case metadata.TIMESTAMP: + r.Simulator.MDTimestamp.Signed.Version = version + } +} + +// GetVersion returns the current version for a role +func (r *TestRepository) GetVersion(role string) int64 { + switch role { + case metadata.ROOT: + return r.Simulator.MDRoot.Signed.Version + case metadata.TARGETS: + return r.Simulator.MDTargets.Signed.Version + case metadata.SNAPSHOT: + return r.Simulator.MDSnapshot.Signed.Version + case metadata.TIMESTAMP: + return r.Simulator.MDTimestamp.Signed.Version + } + return 0 +} + +// BumpVersion increments version for a role +func (r *TestRepository) BumpVersion(role string) { + switch role { + case metadata.ROOT: + r.Simulator.MDRoot.Signed.Version++ + r.Simulator.PublishRoot() + case metadata.TARGETS: + r.Simulator.MDTargets.Signed.Version++ + case metadata.SNAPSHOT: + r.Simulator.MDSnapshot.Signed.Version++ + case metadata.TIMESTAMP: + r.Simulator.MDTimestamp.Signed.Version++ + } +} + +// RemoveSigners removes signers for a role (for testing unsigned metadata) +func (r *TestRepository) RemoveSigners(role string) { + delete(r.Simulator.Signers, role) +} + +// RotateKeys rotates keys for a role +func (r *TestRepository) RotateKeys(role string) { + r.Simulator.RotateKeys(role) +} + +// AddTarget adds a target file to the repository +func (r *TestRepository) AddTarget(role string, content []byte, path string) { + r.Simulator.AddTarget(role, content, path) +} + +// UpdateSnapshot updates snapshot metadata and timestamp +func (r *TestRepository) UpdateSnapshot() { + r.Simulator.UpdateSnapshot() +} + +// UpdateTimestamp updates only timestamp metadata +func (r *TestRepository) UpdateTimestamp() { + r.Simulator.UpdateTimestamp() +} + +// EnableComputeHashesAndLength enables hash/length computation for meta files +func (r *TestRepository) EnableComputeHashesAndLength() { + r.Simulator.ComputeMetafileHashesAndLength = true +} + +// DisableComputeHashesAndLength disables hash/length computation for meta files +func (r *TestRepository) DisableComputeHashesAndLength() { + r.Simulator.ComputeMetafileHashesAndLength = false +} + +// PastTime returns a time in the past for expiration testing +func (r *TestRepository) PastTime() time.Time { + return r.pastTime +} + +// SetSnapshotMeta sets the meta information for a role in snapshot +func (r *TestRepository) SetSnapshotMeta(role string, version int64) { + r.Simulator.MDSnapshot.Signed.Meta[fmt.Sprintf("%s.json", role)].Version = version +} + +// SetTimestampSnapshotMeta sets the snapshot meta information in timestamp +func (r *TestRepository) SetTimestampSnapshotMeta(version int64) { + r.Simulator.MDTimestamp.Signed.Meta["snapshot.json"].Version = version +} + +// AssertFilesExist asserts that local metadata files exist for the given roles +func (r *TestRepository) AssertFilesExist(roles []string) { + r.t.Helper() + + expectedFiles := make(map[string]bool) + for _, role := range roles { + expectedFiles[fmt.Sprintf("%s.json", role)] = true + } + + files, err := os.ReadDir(r.MetadataDir) + if err != nil { + r.t.Fatalf("failed to read metadata dir: %v", err) + } + + actualFiles := make(map[string]bool) + for _, f := range files { + actualFiles[f.Name()] = true + } + + for expected := range expectedFiles { + if !actualFiles[expected] { + r.t.Errorf("expected file %s not found in metadata dir", expected) + } + } +} + +// AssertFilesExact asserts that exactly these files exist in the metadata dir +func (r *TestRepository) AssertFilesExact(roles []string) { + r.t.Helper() + + expectedFiles := make(map[string]bool) + for _, role := range roles { + expectedFiles[fmt.Sprintf("%s.json", role)] = true + } + + files, err := os.ReadDir(r.MetadataDir) + if err != nil { + r.t.Fatalf("failed to read metadata dir: %v", err) + } + + actualFiles := make(map[string]bool) + for _, f := range files { + actualFiles[f.Name()] = true + } + + if len(expectedFiles) != len(actualFiles) { + r.t.Errorf("expected %d files, got %d", len(expectedFiles), len(actualFiles)) + } + + for expected := range expectedFiles { + if !actualFiles[expected] { + r.t.Errorf("expected file %s not found", expected) + } + } + + for actual := range actualFiles { + if !expectedFiles[actual] { + r.t.Errorf("unexpected file %s found", actual) + } + } +} + +// AssertVersionEquals asserts that a local metadata file has the expected version +func (r *TestRepository) AssertVersionEquals(role string, expectedVersion int64) { + r.t.Helper() + + path := filepath.Join(r.MetadataDir, fmt.Sprintf("%s.json", role)) + + switch role { + case metadata.ROOT: + md, err := r.Simulator.MDRoot.FromFile(path) + if err != nil { + r.t.Fatalf("failed to load %s: %v", role, err) + } + if md.Signed.Version != expectedVersion { + r.t.Errorf("expected %s version %d, got %d", role, expectedVersion, md.Signed.Version) + } + case metadata.TARGETS: + md, err := r.Simulator.MDTargets.FromFile(path) + if err != nil { + r.t.Fatalf("failed to load %s: %v", role, err) + } + if md.Signed.Version != expectedVersion { + r.t.Errorf("expected %s version %d, got %d", role, expectedVersion, md.Signed.Version) + } + case metadata.TIMESTAMP: + md, err := r.Simulator.MDTimestamp.FromFile(path) + if err != nil { + r.t.Fatalf("failed to load %s: %v", role, err) + } + if md.Signed.Version != expectedVersion { + r.t.Errorf("expected %s version %d, got %d", role, expectedVersion, md.Signed.Version) + } + case metadata.SNAPSHOT: + md, err := r.Simulator.MDSnapshot.FromFile(path) + if err != nil { + r.t.Fatalf("failed to load %s: %v", role, err) + } + if md.Signed.Version != expectedVersion { + r.t.Errorf("expected %s version %d, got %d", role, expectedVersion, md.Signed.Version) + } + } +} + +// AssertContentEquals asserts that local file content matches the simulator's metadata +func (r *TestRepository) AssertContentEquals(role string, version *int) { + r.t.Helper() + + expectedContent, err := r.Simulator.FetchMetadata(role, version) + if err != nil { + r.t.Fatalf("failed to fetch expected metadata: %v", err) + } + + actualContent, err := os.ReadFile(filepath.Join(r.MetadataDir, fmt.Sprintf("%s.json", role))) + if err != nil { + r.t.Fatalf("failed to read actual metadata: %v", err) + } + + if string(expectedContent) != string(actualContent) { + r.t.Errorf("content mismatch for %s", role) + } +} + +// ReloadRootBytes reloads the root bytes from the metadata directory +func (r *TestRepository) ReloadRootBytes() error { + rootPath := filepath.Join(r.MetadataDir, "root.json") + bytes, err := os.ReadFile(rootPath) + if err != nil { + return err + } + r.RootBytes = bytes + return nil +} + +// WriteRoot writes root metadata to the metadata directory +func (r *TestRepository) WriteRoot(version int) error { + if version < 1 || version > len(r.Simulator.SignedRoots) { + return fmt.Errorf("invalid root version: %d", version) + } + rootPath := filepath.Join(r.MetadataDir, "root.json") + return os.WriteFile(rootPath, r.Simulator.SignedRoots[version-1], 0644) +} diff --git a/internal/testutils/simulator/util.go b/internal/testutils/simulator/util.go new file mode 100644 index 00000000..5ec9bfbd --- /dev/null +++ b/internal/testutils/simulator/util.go @@ -0,0 +1,76 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package simulator + +import ( + "crypto" + "crypto/ed25519" + "log/slog" + "net/url" + "path/filepath" + "regexp" + "strings" + + "github.com/sigstore/sigstore/pkg/signature" +) + +// createKey generates a new ed25519 public-private key pair and a signer using the private key. +// It returns pointers to the public key, private key, and the signer. +// If there is an error during the key generation or signer loading, it logs the error and continues. +func createKey() (*ed25519.PublicKey, *ed25519.PrivateKey, *signature.Signer) { + public, private, err := ed25519.GenerateKey(nil) + if err != nil { + slog.Error("Failed to generate key", "err", err) + } + + signer, err := signature.LoadSigner(private, crypto.Hash(0)) + if err != nil { + slog.Error("Failed to load signer", "err", err) + } + + return &public, &private, &signer +} + +// trimPrefix is a function that takes a path and a prefix as input. +// It checks if the path starts with a drive letter (e.g., "C:\"). +// If it does, it trims the prefix from the path. +// If it doesn't, it parses the path as a URL and trims the prefix from the URL's path. +// The function returns the trimmed path or an error if the path cannot be parsed as a URL. +func trimPrefix(path string, prefix string) (string, error) { + var toTrim string + if match, _ := regexp.MatchString(`^[a-zA-Z]:\\`, path); match { + toTrim = path + } else { + parsedURL, err := url.Parse(path) + if err != nil { + return "", err + } + + toTrim = parsedURL.Path + } + + return strings.TrimPrefix(toTrim, prefix), nil +} + +func hasPrefix(path, prefix string) bool { + return strings.HasPrefix(filepath.ToSlash(path), prefix) +} + +func hasSuffix(path, prefix string) bool { + return strings.HasSuffix(filepath.ToSlash(path), prefix) +} diff --git a/metadata/config/config_table_test.go b/metadata/config/config_table_test.go new file mode 100644 index 00000000..bc6ce3e8 --- /dev/null +++ b/metadata/config/config_table_test.go @@ -0,0 +1,309 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package config + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/theupdateframework/go-tuf/v2/internal/testutils/helpers" + "github.com/theupdateframework/go-tuf/v2/metadata/fetcher" +) + +// TestUpdaterConfigNew tests the New constructor using table-driven tests. +// It covers valid and invalid remote URL inputs. +func TestUpdaterConfigNew(t *testing.T) { + rootBytes := helpers.CreateTestRootJSON(t) + + tests := []struct { + name string + remoteURL string + expectError bool + errorMessage string + validate func(t *testing.T, cfg *UpdaterConfig) + }{ + { + name: "valid simple path", + remoteURL: "simple/path", + expectError: false, + validate: func(t *testing.T, cfg *UpdaterConfig) { + t.Helper() + assert.NotNil(t, cfg) + assert.Equal(t, "simple/path", cfg.RemoteMetadataURL) + assert.Equal(t, int64(256), cfg.MaxRootRotations) + assert.Equal(t, 32, cfg.MaxDelegations) + assert.Equal(t, int64(512000), cfg.RootMaxLength) + assert.Equal(t, int64(16384), cfg.TimestampMaxLength) + assert.Equal(t, int64(2000000), cfg.SnapshotMaxLength) + assert.Equal(t, int64(5000000), cfg.TargetsMaxLength) + assert.False(t, cfg.UnsafeLocalMode) + assert.True(t, cfg.PrefixTargetsWithHash) + assert.NotNil(t, cfg.Fetcher) + assert.IsType(t, &fetcher.DefaultFetcher{}, cfg.Fetcher) + }, + }, + { + name: "valid absolute path", + remoteURL: "/absolute/path/to/metadata", + expectError: false, + validate: func(t *testing.T, cfg *UpdaterConfig) { + t.Helper() + assert.NotNil(t, cfg) + assert.Equal(t, "/absolute/path/to/metadata", cfg.RemoteMetadataURL) + }, + }, + { + name: "valid https URL", + remoteURL: "https://example.com/metadata", + expectError: false, + validate: func(t *testing.T, cfg *UpdaterConfig) { + t.Helper() + assert.Equal(t, "https://example.com/metadata", cfg.RemoteMetadataURL) + assert.Equal(t, "https://example.com/metadata/targets", cfg.RemoteTargetsURL) + }, + }, + { + name: "valid file URL", + remoteURL: "file:///path/to/metadata", + expectError: false, + }, + { + name: "empty remote URL", + remoteURL: "", + expectError: false, + validate: func(t *testing.T, cfg *UpdaterConfig) { + t.Helper() + assert.NotNil(t, cfg) + assert.Equal(t, "", cfg.RemoteMetadataURL) + }, + }, + { + name: "invalid control character in URL", + remoteURL: string([]byte{0x7f}), + expectError: true, + errorMessage: "invalid control character", + }, + { + name: "path with spaces", + remoteURL: "path with spaces", + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg, err := New(tt.remoteURL, rootBytes) + if tt.expectError { + assert.Error(t, err) + if tt.errorMessage != "" { + assert.Contains(t, err.Error(), tt.errorMessage) + } + assert.Nil(t, cfg) + return + } + assert.NoError(t, err) + assert.NotNil(t, cfg) + if tt.validate != nil { + tt.validate(t, cfg) + } + }) + } +} + +// TestUpdaterConfigDefaults verifies the default and custom configuration values. +func TestUpdaterConfigDefaults(t *testing.T) { + t.Run("default configuration values", func(t *testing.T) { + cfg, err := New("https://example.com", helpers.CreateTestRootJSON(t)) + assert.NoError(t, err) + + assert.Equal(t, int64(256), cfg.MaxRootRotations) + assert.Equal(t, 32, cfg.MaxDelegations) + assert.Equal(t, int64(512000), cfg.RootMaxLength) + assert.Equal(t, int64(16384), cfg.TimestampMaxLength) + assert.Equal(t, int64(2000000), cfg.SnapshotMaxLength) + assert.Equal(t, int64(5000000), cfg.TargetsMaxLength) + assert.False(t, cfg.UnsafeLocalMode) + assert.True(t, cfg.PrefixTargetsWithHash) + assert.False(t, cfg.DisableLocalCache) + assert.NotNil(t, cfg.Fetcher) + assert.IsType(t, &fetcher.DefaultFetcher{}, cfg.Fetcher) + }) + + t.Run("custom configuration values", func(t *testing.T) { + cfg, err := New("https://example.com", helpers.CreateTestRootJSON(t)) + assert.NoError(t, err) + + cfg.MaxRootRotations = 100 + cfg.MaxDelegations = 10 + cfg.RootMaxLength = 100000 + cfg.UnsafeLocalMode = true + cfg.PrefixTargetsWithHash = false + + assert.Equal(t, int64(100), cfg.MaxRootRotations) + assert.Equal(t, 10, cfg.MaxDelegations) + assert.Equal(t, int64(100000), cfg.RootMaxLength) + assert.True(t, cfg.UnsafeLocalMode) + assert.False(t, cfg.PrefixTargetsWithHash) + }) +} + +// TestEnsurePathsExistTable tests EnsurePathsExist via table-driven subtests. +// EnsurePathsExist calls os.MkdirAll to create local cache directories. +func TestEnsurePathsExistTable(t *testing.T) { + tests := []struct { + name string + buildConfig func(t *testing.T) *UpdaterConfig + expectError bool + errorMessage string + }{ + { + name: "creates metadata and targets directories", + buildConfig: func(t *testing.T) *UpdaterConfig { + t.Helper() + tmp := t.TempDir() + cfg, err := New("https://example.com", helpers.CreateTestRootJSON(t)) + assert.NoError(t, err) + cfg.LocalMetadataDir = filepath.Join(tmp, "metadata") + cfg.LocalTargetsDir = filepath.Join(tmp, "targets") + return cfg + }, + expectError: false, + }, + { + name: "creates deeply nested directories", + buildConfig: func(t *testing.T) *UpdaterConfig { + t.Helper() + tmp := t.TempDir() + cfg, err := New("https://example.com", helpers.CreateTestRootJSON(t)) + assert.NoError(t, err) + cfg.LocalMetadataDir = filepath.Join(tmp, "a", "b", "c", "metadata") + cfg.LocalTargetsDir = filepath.Join(tmp, "a", "b", "c", "targets") + return cfg + }, + expectError: false, + }, + { + name: "no-op when DisableLocalCache is true", + buildConfig: func(t *testing.T) *UpdaterConfig { + t.Helper() + cfg, err := New("https://example.com", helpers.CreateTestRootJSON(t)) + assert.NoError(t, err) + cfg.DisableLocalCache = true + // Empty paths that would otherwise fail + cfg.LocalMetadataDir = "" + cfg.LocalTargetsDir = "" + return cfg + }, + expectError: false, + }, + { + name: "fails when metadata dir path is empty", + buildConfig: func(t *testing.T) *UpdaterConfig { + t.Helper() + cfg, err := New("https://example.com", helpers.CreateTestRootJSON(t)) + assert.NoError(t, err) + cfg.LocalMetadataDir = "" + cfg.LocalTargetsDir = "" + return cfg + }, + expectError: true, + }, + { + name: "fails when metadata dir path is a file", + buildConfig: func(t *testing.T) *UpdaterConfig { + t.Helper() + tmp := t.TempDir() + // Create a file at the path where a directory is expected + metadataFile := filepath.Join(tmp, "metadata_file") + err := os.WriteFile(metadataFile, []byte("test"), 0600) + assert.NoError(t, err) + + cfg, err := New("https://example.com", helpers.CreateTestRootJSON(t)) + assert.NoError(t, err) + cfg.LocalMetadataDir = metadataFile + cfg.LocalTargetsDir = filepath.Join(tmp, "targets") + return cfg + }, + expectError: true, + errorMessage: "not a directory", + }, + { + name: "already-existing directories succeed", + buildConfig: func(t *testing.T) *UpdaterConfig { + t.Helper() + tmp := t.TempDir() + metadataDir := filepath.Join(tmp, "metadata") + targetsDir := filepath.Join(tmp, "targets") + assert.NoError(t, os.MkdirAll(metadataDir, 0700)) + assert.NoError(t, os.MkdirAll(targetsDir, 0700)) + + cfg, err := New("https://example.com", helpers.CreateTestRootJSON(t)) + assert.NoError(t, err) + cfg.LocalMetadataDir = metadataDir + cfg.LocalTargetsDir = targetsDir + return cfg + }, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := tt.buildConfig(t) + err := cfg.EnsurePathsExist() + + if tt.expectError { + assert.Error(t, err) + if tt.errorMessage != "" { + assert.Contains(t, err.Error(), tt.errorMessage) + } + return + } + assert.NoError(t, err) + }) + } +} + +// TestUpdaterConfigCopy verifies that config values can be copied independently. +func TestUpdaterConfigCopy(t *testing.T) { + original, err := New("https://example.com", helpers.CreateTestRootJSON(t)) + assert.NoError(t, err) + + copied := *original + + // Mutate the original + original.MaxRootRotations = 999 + original.UnsafeLocalMode = true + + // Verify the copy is independent of the original + assert.NotEqual(t, copied.MaxRootRotations, original.MaxRootRotations) + assert.NotEqual(t, copied.UnsafeLocalMode, original.UnsafeLocalMode) +} + +// TestUpdaterConfigCustomFetcher verifies that a custom fetcher can be set. +func TestUpdaterConfigCustomFetcher(t *testing.T) { + cfg, err := New("https://example.com", helpers.CreateTestRootJSON(t)) + assert.NoError(t, err) + + customFetcher := &fetcher.DefaultFetcher{} + cfg.Fetcher = customFetcher + + assert.Same(t, customFetcher, cfg.Fetcher) +} diff --git a/metadata/marshal_test.go b/metadata/marshal_test.go new file mode 100644 index 00000000..59e11914 --- /dev/null +++ b/metadata/marshal_test.go @@ -0,0 +1,542 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "encoding/json" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/theupdateframework/go-tuf/v2/internal/testutils/helpers" +) + +func TestMarshalUnmarshalJSON(t *testing.T) { + fixedTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + + tests := []struct { + name string + input any + wantErr bool + }{ + { + name: "RootType Marshal/Unmarshal", + input: RootType{Type: "root", SpecVersion: "1.0", ConsistentSnapshot: true, Version: 1, Expires: fixedTime}, + wantErr: false, + }, + { + name: "SnapshotType Marshal/Unmarshal", + input: SnapshotType{Type: "snapshot", SpecVersion: "1.0", Version: 1, Expires: fixedTime}, + wantErr: false, + }, + { + name: "TimestampType Marshal/Unmarshal", + input: TimestampType{Type: "timestamp", SpecVersion: "1.0", Version: 1, Expires: fixedTime}, + wantErr: false, + }, + { + name: "TargetsType Marshal/Unmarshal", + input: TargetsType{Type: "targets", SpecVersion: "1.0", Version: 1, Expires: fixedTime}, + wantErr: false, + }, + { + name: "MetaFiles Marshal/Unmarshal", + input: MetaFiles{Length: 123, Hashes: Hashes{"sha256": HexBytes("abc123")}, Version: 1}, + wantErr: false, + }, + { + name: "TargetFiles Marshal/Unmarshal", + input: TargetFiles{Length: 123, Hashes: Hashes{"sha256": HexBytes("abc123")}}, + wantErr: false, + }, + { + name: "Key Marshal/Unmarshal", + input: Key{Type: "ed25519", Scheme: "scheme", Value: KeyVal{PublicKey: "publicKey"}}, + wantErr: false, + }, + { + name: "Signature Marshal/Unmarshal", + input: Signature{KeyID: "keyid", Signature: HexBytes("signature")}, + wantErr: false, + }, + { + name: "Delegations Marshal/Unmarshal", + input: Delegations{Keys: map[string]*Key{"keyid": {Type: "ed25519", Scheme: "scheme", Value: KeyVal{PublicKey: "publicKey"}}}}, + wantErr: false, + }, + { + name: "DelegatedRole Marshal/Unmarshal", + input: DelegatedRole{Name: "role", KeyIDs: []string{"keyid"}, Threshold: 1, Terminating: true}, + wantErr: false, + }, + { + name: "SuccinctRoles Marshal/Unmarshal", + input: SuccinctRoles{KeyIDs: []string{"keyid"}, Threshold: 1, BitLength: 8, NamePrefix: "prefix"}, + wantErr: false, + }, + { + name: "HexBytes Marshal/Unmarshal", + input: HexBytes("abc123"), + wantErr: false, + }, + { + name: "Empty HexBytes Marshal/Unmarshal", + input: HexBytes(""), + wantErr: false, + }, + { + name: "Role Marshal/Unmarshal", + input: Role{KeyIDs: []string{"key1", "key2"}, Threshold: 2}, + wantErr: false, + }, + { + name: "KeyVal Marshal/Unmarshal", + input: KeyVal{PublicKey: "public-key-bytes"}, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Marshal the input + data, err := json.Marshal(tt.input) + if (err != nil) != tt.wantErr { + t.Errorf("Marshal() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if tt.wantErr { + return // Skip unmarshal test if marshal was expected to fail + } + + // Unmarshal back to same type + switch v := tt.input.(type) { + case RootType: + var result RootType + err = json.Unmarshal(data, &result) + if err != nil { + t.Errorf("Unmarshal() error = %v", err) + return + } + assert.Equal(t, result.Type, v.Type) + assert.Equal(t, result.Version, v.Version) + assert.Equal(t, result.SpecVersion, v.SpecVersion) + + case SnapshotType: + var result SnapshotType + err = json.Unmarshal(data, &result) + if err != nil { + t.Errorf("Unmarshal() error = %v", err) + return + } + assert.Equal(t, result.Type, v.Type) + assert.Equal(t, result.Version, v.Version) + + case TimestampType: + var result TimestampType + err = json.Unmarshal(data, &result) + if err != nil { + t.Errorf("Unmarshal() error = %v", err) + return + } + assert.Equal(t, result.Type, v.Type) + assert.Equal(t, result.Version, v.Version) + + case TargetsType: + var result TargetsType + err = json.Unmarshal(data, &result) + if err != nil { + t.Errorf("Unmarshal() error = %v", err) + return + } + assert.Equal(t, result.Type, v.Type) + assert.Equal(t, result.Version, v.Version) + + case MetaFiles: + var result MetaFiles + err = json.Unmarshal(data, &result) + if err != nil { + t.Errorf("Unmarshal() error = %v", err) + return + } + assert.Equal(t, result.Length, v.Length) + assert.Equal(t, result.Version, v.Version) + + case TargetFiles: + var result TargetFiles + err = json.Unmarshal(data, &result) + if err != nil { + t.Errorf("Unmarshal() error = %v", err) + return + } + assert.Equal(t, result.Length, v.Length) + + case Key: + var result Key + err = json.Unmarshal(data, &result) + if err != nil { + t.Errorf("Unmarshal() error = %v", err) + return + } + assert.Equal(t, result.Type, v.Type) + assert.Equal(t, result.Scheme, v.Scheme) + + case Signature: + var result Signature + err = json.Unmarshal(data, &result) + if err != nil { + t.Errorf("Unmarshal() error = %v", err) + return + } + assert.Equal(t, result.KeyID, v.KeyID) + assert.Equal(t, string(result.Signature), string(v.Signature)) + + case Delegations: + var result Delegations + err = json.Unmarshal(data, &result) + if err != nil { + t.Errorf("Unmarshal() error = %v", err) + return + } + assert.Equal(t, len(result.Keys), len(v.Keys)) + + case DelegatedRole: + var result DelegatedRole + err = json.Unmarshal(data, &result) + if err != nil { + t.Errorf("Unmarshal() error = %v", err) + return + } + assert.Equal(t, result.Name, v.Name) + assert.Equal(t, result.Threshold, v.Threshold) + assert.Equal(t, result.Terminating, v.Terminating) + + case SuccinctRoles: + var result SuccinctRoles + err = json.Unmarshal(data, &result) + if err != nil { + t.Errorf("Unmarshal() error = %v", err) + return + } + assert.Equal(t, result.Threshold, v.Threshold) + assert.Equal(t, result.BitLength, v.BitLength) + assert.Equal(t, result.NamePrefix, v.NamePrefix) + + case HexBytes: + var result HexBytes + err = json.Unmarshal(data, &result) + if err != nil { + t.Errorf("Unmarshal() error = %v", err) + return + } + assert.Equal(t, string(result), string(v)) + + case Role: + var result Role + err = json.Unmarshal(data, &result) + if err != nil { + t.Errorf("Unmarshal() error = %v", err) + return + } + assert.Equal(t, result.Threshold, v.Threshold) + assert.ElementsMatch(t, result.KeyIDs, v.KeyIDs) + + case KeyVal: + var result KeyVal + err = json.Unmarshal(data, &result) + if err != nil { + t.Errorf("Unmarshal() error = %v", err) + return + } + assert.Equal(t, string(result.PublicKey), string(v.PublicKey)) + + default: + t.Errorf("Unknown type for roundtrip test: %T", tt.input) + } + + assert.NoError(t, err) + }) + } +} + +func TestMarshalEdgeCases(t *testing.T) { + tests := []struct { + name string + input any + wantErr bool + }{ + { + name: "Nil map in Delegations", + input: Delegations{Keys: nil}, + wantErr: false, + }, + { + name: "Empty KeyIDs slice", + input: Role{KeyIDs: []string{}, Threshold: 1}, + wantErr: false, + }, + { + name: "Zero threshold", + input: Role{KeyIDs: []string{"key1"}, Threshold: 0}, + wantErr: false, + }, + { + name: "Negative threshold", + input: SuccinctRoles{KeyIDs: []string{"key1"}, Threshold: -1, BitLength: 8, NamePrefix: "prefix"}, + wantErr: false, // JSON marshaling allows negative threshold; validation happens elsewhere + }, + { + name: "Very large numbers", + input: MetaFiles{Length: 9223372036854775807, Version: 9223372036854775807}, // max int64 + wantErr: false, + }, + { + name: "Unicode in strings", + input: DelegatedRole{Name: "роль-тест-🔑", KeyIDs: []string{"🔑-keyid"}, Threshold: 1}, + wantErr: false, + }, + { + name: "Empty strings", + input: Key{Type: "", Scheme: "", Value: KeyVal{PublicKey: ""}}, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + data, err := json.Marshal(tt.input) + if (err != nil) != tt.wantErr { + t.Errorf("Marshal() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if !tt.wantErr { + // Verify it produces valid JSON + var result interface{} + err = json.Unmarshal(data, &result) + helpers.AssertNoError(t, err) + } + }) + } +} + +func TestUnmarshalErrorCases(t *testing.T) { + tests := []struct { + name string + targetType string + jsonData string + wantErr bool + }{ + { + name: "Invalid JSON", + targetType: "RootType", + jsonData: "{invalid json}", + wantErr: true, + }, + { + name: "Missing required fields", + targetType: "RootType", + jsonData: "{}", + wantErr: false, // JSON unmarshaling might use zero values + }, + { + name: "Wrong type for numeric field", + targetType: "MetaFiles", + jsonData: `{"length": "not-a-number", "version": 1}`, + wantErr: true, + }, + { + name: "Null values", + targetType: "Signature", + jsonData: `{"keyid": null, "sig": null}`, + wantErr: true, // null sig is invalid hex bytes + }, + { + name: "Array instead of object", + targetType: "Key", + jsonData: `[]`, + wantErr: true, + }, + { + name: "Nested invalid JSON", + targetType: "Delegations", + jsonData: `{"keys": {"key1": {"keytype": 123}}}`, // keytype should be string + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var err error + + switch tt.targetType { + case "RootType": + var result RootType + err = json.Unmarshal([]byte(tt.jsonData), &result) + case "MetaFiles": + var result MetaFiles + err = json.Unmarshal([]byte(tt.jsonData), &result) + case "Signature": + var result Signature + err = json.Unmarshal([]byte(tt.jsonData), &result) + case "Key": + var result Key + err = json.Unmarshal([]byte(tt.jsonData), &result) + case "Delegations": + var result Delegations + err = json.Unmarshal([]byte(tt.jsonData), &result) + default: + t.Fatalf("Unknown target type: %s", tt.targetType) + } + + if tt.wantErr { + assert.Error(t, err, "Expected error for invalid JSON") + } else { + assert.NoError(t, err, "Expected no error for valid JSON") + } + }) + } +} + +func TestComplexStructuresMarshaling(t *testing.T) { + fixedTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + + tests := []struct { + name string + input any + wantErr bool + }{ + { + name: "Complete RootType with all fields", + input: RootType{ + Type: "root", + SpecVersion: "1.0.31", + Version: 1, + Expires: fixedTime, + Keys: map[string]*Key{ + "key1": { + Type: "ed25519", + Scheme: "ed25519", + Value: KeyVal{PublicKey: "publickey1"}, + }, + "key2": { + Type: "rsa", + Scheme: "rsa-pss-sha256", + Value: KeyVal{PublicKey: "publickey2"}, + }, + }, + Roles: map[string]*Role{ + "root": { + KeyIDs: []string{"key1"}, + Threshold: 1, + }, + "targets": { + KeyIDs: []string{"key2"}, + Threshold: 1, + }, + }, + ConsistentSnapshot: true, + }, + wantErr: false, + }, + { + name: "TargetsType with complex targets and delegations", + input: TargetsType{ + Type: "targets", + SpecVersion: "1.0.31", + Version: 2, + Expires: fixedTime, + Targets: map[string]*TargetFiles{ + "file1.txt": { + Length: 1024, + Hashes: Hashes{ + "sha256": HexBytes("abc123"), + "sha512": HexBytes("def456"), + }, + }, + }, + Delegations: &Delegations{ + Keys: map[string]*Key{ + "delegate-key": { + Type: "ed25519", + Scheme: "ed25519", + Value: KeyVal{PublicKey: "delegate-public-key"}, + }, + }, + Roles: []DelegatedRole{ + { + Name: "delegate-role", + KeyIDs: []string{"delegate-key"}, + Threshold: 1, + Terminating: false, + Paths: []string{"path/*"}, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "Nested structure with empty collections", + input: TargetsType{ + Type: "targets", + SpecVersion: "1.0.31", + Version: 1, + Expires: fixedTime, + Targets: map[string]*TargetFiles{}, + Delegations: &Delegations{ + Keys: map[string]*Key{}, + Roles: []DelegatedRole{}, + }, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Test marshaling + data, err := json.Marshal(tt.input) + if (err != nil) != tt.wantErr { + t.Errorf("Marshal() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if tt.wantErr { + return + } + + // Verify valid JSON + var jsonData interface{} + err = json.Unmarshal(data, &jsonData) + helpers.AssertNoError(t, err) + + // Test unmarshaling back + switch tt.input.(type) { + case RootType: + var result RootType + err = json.Unmarshal(data, &result) + helpers.AssertNoError(t, err) + case TargetsType: + var result TargetsType + err = json.Unmarshal(data, &result) + helpers.AssertNoError(t, err) + } + }) + } +} diff --git a/metadata/metadata_bench_test.go b/metadata/metadata_bench_test.go new file mode 100644 index 00000000..ff51f3e6 --- /dev/null +++ b/metadata/metadata_bench_test.go @@ -0,0 +1,493 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "crypto" + "crypto/ed25519" + "crypto/rand" + "encoding/json" + mathrand "math/rand/v2" + "testing" + "time" + + "github.com/sigstore/sigstore/pkg/signature" +) + +// BenchmarkMetadataCreation benchmarks metadata creation operations +func BenchmarkMetadataCreation(b *testing.B) { + expiry := time.Now().UTC().Add(24 * time.Hour) + + benchmarks := []struct { + name string + fn func() any + }{ + { + name: "Root", + fn: func() any { return Root(expiry) }, + }, + { + name: "Targets", + fn: func() any { return Targets(expiry) }, + }, + { + name: "Snapshot", + fn: func() any { return Snapshot(expiry) }, + }, + { + name: "Timestamp", + fn: func() any { return Timestamp(expiry) }, + }, + } + + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + for b.Loop() { + _ = bm.fn() + } + }) + } +} + +// BenchmarkMetadataToBytes benchmarks serialization to bytes +func BenchmarkMetadataToBytes(b *testing.B) { + expiry := time.Now().UTC().Add(24 * time.Hour) + + root := Root(expiry) + targets := Targets(expiry) + snapshot := Snapshot(expiry) + timestamp := Timestamp(expiry) + + // Add some data to make benchmarks more realistic + root.Signed.Keys["test-key"] = &Key{ + Type: "ed25519", + Scheme: "ed25519", + Value: KeyVal{PublicKey: "test-public-key"}, + } + + targets.Signed.Targets["test-file"] = &TargetFiles{ + Length: 1024, + Hashes: Hashes{ + "sha256": HexBytes("abcdef1234567890"), + }, + } + + benchmarks := []struct { + name string + metadata func() ([]byte, error) + }{ + { + name: "Root-Compact", + metadata: func() ([]byte, error) { + return root.ToBytes(true) + }, + }, + { + name: "Root-Pretty", + metadata: func() ([]byte, error) { + return root.ToBytes(false) + }, + }, + { + name: "Targets-Compact", + metadata: func() ([]byte, error) { + return targets.ToBytes(true) + }, + }, + { + name: "Targets-Pretty", + metadata: func() ([]byte, error) { + return targets.ToBytes(false) + }, + }, + { + name: "Snapshot-Compact", + metadata: func() ([]byte, error) { + return snapshot.ToBytes(true) + }, + }, + { + name: "Timestamp-Compact", + metadata: func() ([]byte, error) { + return timestamp.ToBytes(true) + }, + }, + } + + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + for b.Loop() { + _, err := bm.metadata() + if err != nil { + b.Fatal(err) + } + } + }) + } +} + +// BenchmarkMetadataFromBytes benchmarks deserialization from bytes +func BenchmarkMetadataFromBytes(b *testing.B) { + // Pre-generate test data + expiry := time.Now().UTC().Add(24 * time.Hour) + + root := Root(expiry) + targets := Targets(expiry) + snapshot := Snapshot(expiry) + timestamp := Timestamp(expiry) + + rootData, _ := root.ToBytes(false) + targetsData, _ := targets.ToBytes(false) + snapshotData, _ := snapshot.ToBytes(false) + timestampData, _ := timestamp.ToBytes(false) + + benchmarks := []struct { + name string + data []byte + fn func([]byte) error + }{ + { + name: "Root", + data: rootData, + fn: func(data []byte) error { + _, err := Root().FromBytes(data) + return err + }, + }, + { + name: "Targets", + data: targetsData, + fn: func(data []byte) error { + _, err := Targets().FromBytes(data) + return err + }, + }, + { + name: "Snapshot", + data: snapshotData, + fn: func(data []byte) error { + _, err := Snapshot().FromBytes(data) + return err + }, + }, + { + name: "Timestamp", + data: timestampData, + fn: func(data []byte) error { + _, err := Timestamp().FromBytes(data) + return err + }, + }, + } + + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + for b.Loop() { + err := bm.fn(bm.data) + if err != nil { + b.Fatal(err) + } + } + }) + } +} + +// BenchmarkSignatureOperations benchmarks signature creation and verification +func BenchmarkSignatureOperations(b *testing.B) { + // Generate test key pair + pub, priv, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + b.Fatal(err) + } + + // Create test metadata + root := Root() + keyID := "test-key-id" + root.Signed.Keys[keyID] = &Key{ + Type: "ed25519", + Scheme: "ed25519", + Value: KeyVal{PublicKey: string(pub)}, + } + root.Signed.Roles[ROOT].KeyIDs = []string{keyID} + + b.Run("Sign", func(b *testing.B) { + for b.Loop() { + // Create a fresh copy for each iteration + testRoot := Root() + testRoot.Signed = root.Signed + + // Create signer from private key + signer, err := signature.LoadSigner(priv, crypto.Hash(0)) + if err != nil { + b.Fatal(err) + } + + _, err = testRoot.Sign(signer) + if err != nil { + b.Fatal(err) + } + } + }) + + // Sign once for verification benchmarks + signer, err := signature.LoadSigner(priv, crypto.Hash(0)) + if err != nil { + b.Fatal(err) + } + _, err = root.Sign(signer) + if err != nil { + b.Fatal(err) + } +} + +// BenchmarkJSONOperations benchmarks raw JSON operations for comparison +func BenchmarkJSONOperations(b *testing.B) { + expiry := time.Now().UTC().Add(24 * time.Hour) + root := Root(expiry) + + // Add some complexity + for i := 0; i < 10; i++ { + keyID := generateRandomString(64) + root.Signed.Keys[keyID] = &Key{ + Type: "ed25519", + Scheme: "ed25519", + Value: KeyVal{PublicKey: string(generateRandomBytes(32))}, + } + } + + data, err := json.Marshal(root) + if err != nil { + b.Fatal(err) + } + + b.Run("JSON-Marshal", func(b *testing.B) { + for b.Loop() { + _, err := json.Marshal(root) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("JSON-Unmarshal", func(b *testing.B) { + for b.Loop() { + var result Metadata[RootType] + err := json.Unmarshal(data, &result) + if err != nil { + b.Fatal(err) + } + } + }) +} + +// BenchmarkHexBytesOperations benchmarks HexBytes operations +func BenchmarkHexBytesOperations(b *testing.B) { + testData := [][]byte{ + []byte("small"), + make([]byte, 256), // medium + make([]byte, 8192), // large + make([]byte, 65536), // very large + } + + // Fill test data with random bytes + for _, data := range testData { + _, _ = rand.Read(data) + } + + for i, data := range testData { + size := []string{"Small", "Medium", "Large", "VeryLarge"}[i] + + b.Run("Marshal-"+size, func(b *testing.B) { + hexBytes := HexBytes(data) + for b.Loop() { + _, err := json.Marshal(hexBytes) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("Unmarshal-"+size, func(b *testing.B) { + hexBytes := HexBytes(data) + jsonData, err := json.Marshal(hexBytes) + if err != nil { + b.Fatal(err) + } + + for b.Loop() { + var result HexBytes + err := json.Unmarshal(jsonData, &result) + if err != nil { + b.Fatal(err) + } + } + }) + } +} + +// BenchmarkComplexStructures benchmarks operations on complex metadata structures +func BenchmarkComplexStructures(b *testing.B) { + // Create complex targets metadata with many files + targets := Targets() + + // Add many target files + for i := 0; i < 1000; i++ { + filename := generateRandomString(20) + ".txt" + targets.Signed.Targets[filename] = &TargetFiles{ + Length: int64(i + 1000), + Hashes: Hashes{ + "sha256": HexBytes(generateRandomString(64)), + "sha512": HexBytes(generateRandomString(128)), + }, + } + } + + // Add delegations + targets.Signed.Delegations = &Delegations{ + Keys: make(map[string]*Key), + Roles: make([]DelegatedRole, 0), + } + + for i := 0; i < 50; i++ { + keyID := generateRandomString(64) + targets.Signed.Delegations.Keys[keyID] = &Key{ + Type: "ed25519", + Scheme: "ed25519", + Value: KeyVal{PublicKey: string(generateRandomBytes(32))}, + } + + targets.Signed.Delegations.Roles = append(targets.Signed.Delegations.Roles, DelegatedRole{ + Name: generateRandomString(15), + KeyIDs: []string{keyID}, + Threshold: 1, + Terminating: i%2 == 1, + Paths: []string{generateRandomString(10) + "/*"}, + }) + } + + b.Run("ComplexTargets-ToBytes", func(b *testing.B) { + for b.Loop() { + _, err := targets.ToBytes(true) + if err != nil { + b.Fatal(err) + } + } + }) + + // Pre-serialize for FromBytes benchmark + complexData, err := targets.ToBytes(true) + if err != nil { + b.Fatal(err) + } + + b.Run("ComplexTargets-FromBytes", func(b *testing.B) { + for b.Loop() { + _, err := Targets().FromBytes(complexData) + if err != nil { + b.Fatal(err) + } + } + }) +} + +// BenchmarkMemoryAllocations benchmarks memory usage patterns +func BenchmarkMemoryAllocations(b *testing.B) { + b.Run("MetadataCreation-Allocs", func(b *testing.B) { + b.ReportAllocs() + for b.Loop() { + root := Root() + _ = root + } + }) + + b.Run("Serialization-Allocs", func(b *testing.B) { + root := Root() + b.ReportAllocs() + for b.Loop() { + data, err := root.ToBytes(true) + if err != nil { + b.Fatal(err) + } + _ = data + } + }) + + b.Run("Deserialization-Allocs", func(b *testing.B) { + root := Root() + data, err := root.ToBytes(true) + if err != nil { + b.Fatal(err) + } + + b.ReportAllocs() + for b.Loop() { + _, err := Root().FromBytes(data) + if err != nil { + b.Fatal(err) + } + } + }) +} + +// BenchmarkConcurrentOperations benchmarks concurrent access patterns +func BenchmarkConcurrentOperations(b *testing.B) { + root := Root() + data, err := root.ToBytes(true) + if err != nil { + b.Fatal(err) + } + + b.Run("ConcurrentDeserialization", func(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _, err := Root().FromBytes(data) + if err != nil { + b.Fatal(err) + } + } + }) + }) + + b.Run("ConcurrentSerialization", func(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + testRoot := Root() + _, err := testRoot.ToBytes(true) + if err != nil { + b.Fatal(err) + } + } + }) + }) +} + +// Helper functions to avoid import cycles +func generateRandomString(length int) string { + const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + b := make([]byte, length) + for i := range b { + b[i] = charset[mathrand.IntN(len(charset))] + } + return string(b) +} + +func generateRandomBytes(length int) []byte { + b := make([]byte, length) + _, _ = rand.Read(b) + return b +} diff --git a/metadata/metadata_fuzz_test.go b/metadata/metadata_fuzz_test.go new file mode 100644 index 00000000..161ef062 --- /dev/null +++ b/metadata/metadata_fuzz_test.go @@ -0,0 +1,418 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "encoding/json" + "testing" + "time" + + "github.com/theupdateframework/go-tuf/v2/internal/testutils/helpers" +) + +// FuzzRootFromBytes tests Root metadata parsing with random input +func FuzzRootFromBytes(f *testing.F) { + // Add seed corpus + root := Root() + validData, _ := root.ToBytes(false) + f.Add(validData) + + // Add some edge cases + f.Add([]byte("")) + f.Add([]byte("{}")) + f.Add([]byte(`{"signed": {"_type": "root"}}`)) + f.Add([]byte(`{"signed": {"_type": "wrong"}, "signatures": []}`)) + + generator := helpers.NewFuzzDataGenerator(uint64(time.Now().UnixNano()), 0) + + // Add corrupted metadata + for i := 0; i < 5; i++ { + f.Add(generator.CreateFuzzTestMetadata("root")) + f.Add(generator.GenerateCorruptedJSON()) + } + + f.Fuzz(func(t *testing.T, data []byte) { + defer func() { + if r := recover(); r != nil { + t.Errorf("Root().FromBytes panicked with input %q: %v", string(data), r) + } + }() + + _, err := Root().FromBytes(data) + // Errors are expected and acceptable for invalid input + _ = err + }) +} + +// FuzzTargetsFromBytes tests Targets metadata parsing with random input +func FuzzTargetsFromBytes(f *testing.F) { + // Add seed corpus + targets := Targets() + validData, _ := targets.ToBytes(false) + f.Add(validData) + + f.Add([]byte("")) + f.Add([]byte("{}")) + f.Add([]byte(`{"signed": {"_type": "targets"}}`)) + + generator := helpers.NewFuzzDataGenerator(uint64(time.Now().UnixNano()), 0) + + for i := 0; i < 5; i++ { + f.Add(generator.CreateFuzzTestMetadata("targets")) + f.Add(generator.GenerateCorruptedJSON()) + } + + f.Fuzz(func(t *testing.T, data []byte) { + defer func() { + if r := recover(); r != nil { + t.Errorf("Targets().FromBytes panicked with input %q: %v", string(data), r) + } + }() + + _, err := Targets().FromBytes(data) + _ = err + }) +} + +// FuzzSnapshotFromBytes tests Snapshot metadata parsing with random input +func FuzzSnapshotFromBytes(f *testing.F) { + snapshot := Snapshot() + validData, _ := snapshot.ToBytes(false) + f.Add(validData) + + f.Add([]byte("")) + f.Add([]byte("{}")) + f.Add([]byte(`{"signed": {"_type": "snapshot"}}`)) + + generator := helpers.NewFuzzDataGenerator(uint64(time.Now().UnixNano()), 0) + + for i := 0; i < 5; i++ { + f.Add(generator.CreateFuzzTestMetadata("snapshot")) + f.Add(generator.GenerateCorruptedJSON()) + } + + f.Fuzz(func(t *testing.T, data []byte) { + defer func() { + if r := recover(); r != nil { + t.Errorf("Snapshot().FromBytes panicked with input %q: %v", string(data), r) + } + }() + + _, err := Snapshot().FromBytes(data) + _ = err + }) +} + +// FuzzTimestampFromBytes tests Timestamp metadata parsing with random input +func FuzzTimestampFromBytes(f *testing.F) { + timestamp := Timestamp() + validData, _ := timestamp.ToBytes(false) + f.Add(validData) + + f.Add([]byte("")) + f.Add([]byte("{}")) + f.Add([]byte(`{"signed": {"_type": "timestamp"}}`)) + + generator := helpers.NewFuzzDataGenerator(uint64(time.Now().UnixNano()), 0) + + for i := 0; i < 5; i++ { + f.Add(generator.CreateFuzzTestMetadata("timestamp")) + f.Add(generator.GenerateCorruptedJSON()) + } + + f.Fuzz(func(t *testing.T, data []byte) { + defer func() { + if r := recover(); r != nil { + t.Errorf("Timestamp().FromBytes panicked with input %q: %v", string(data), r) + } + }() + + _, err := Timestamp().FromBytes(data) + _ = err + }) +} + +// FuzzMetadataToBytes tests metadata serialization +func FuzzMetadataToBytes(f *testing.F) { + // Add some variations + f.Add(int64(1), true) // version, compact + f.Add(int64(999999), false) + f.Add(int64(0), true) + f.Add(int64(-1), false) + + f.Fuzz(func(t *testing.T, version int64, compact bool) { + defer func() { + if r := recover(); r != nil { + t.Errorf("ToBytes panicked with version %d, compact %t: %v", version, compact, r) + } + }() + + // Test each metadata type + testCases := []func(){ + func() { + root := Root() + root.Signed.Version = version + _, _ = root.ToBytes(compact) + }, + func() { + targets := Targets() + targets.Signed.Version = version + _, _ = targets.ToBytes(compact) + }, + func() { + snapshot := Snapshot() + snapshot.Signed.Version = version + _, _ = snapshot.ToBytes(compact) + }, + func() { + timestamp := Timestamp() + timestamp.Signed.Version = version + _, _ = timestamp.ToBytes(compact) + }, + } + + for _, testFunc := range testCases { + testFunc() + } + }) +} + +// FuzzJSONMarshaling tests JSON marshaling of metadata structures +func FuzzJSONMarshaling(f *testing.F) { + generator := helpers.NewFuzzDataGenerator(uint64(time.Now().UnixNano()), 0) + + // Add valid JSON samples + f.Add([]byte(`{"test": "value"}`)) + f.Add([]byte(`{}`)) + f.Add([]byte(`[]`)) + f.Add(helpers.BuildRootJSON()) + + // Add some random data + for i := 0; i < 5; i++ { + f.Add(generator.GenerateRandomJSON()) + f.Add(generator.GenerateCorruptedJSON()) + } + + f.Fuzz(func(t *testing.T, data []byte) { + defer func() { + if r := recover(); r != nil { + t.Errorf("JSON marshaling panicked with input %q: %v", string(data), r) + } + }() + + var v interface{} + err := json.Unmarshal(data, &v) + if err != nil { + // Invalid JSON is expected, not an error + return + } + + // If we can unmarshal, we should be able to marshal back + _, err = json.Marshal(v) + if err != nil { + t.Errorf("failed to marshal back after unmarshal: %v", err) + } + }) +} + +// FuzzHexBytes tests HexBytes marshaling/unmarshaling +func FuzzHexBytes(f *testing.F) { + // Add seed data + f.Add([]byte("")) + f.Add([]byte("test")) + f.Add([]byte("0123456789abcdef")) + f.Add([]byte{0, 1, 2, 3, 255}) + + generator := helpers.NewFuzzDataGenerator(uint64(time.Now().UnixNano()), 0) + for i := 0; i < 10; i++ { + f.Add(generator.GenerateRandomBytes(100)) + } + + f.Fuzz(func(t *testing.T, data []byte) { + defer func() { + if r := recover(); r != nil { + t.Errorf("HexBytes operations panicked with input %v: %v", data, r) + } + }() + + hexBytes := HexBytes(data) + + // Test JSON marshaling + jsonData, err := json.Marshal(hexBytes) + if err != nil { + return // Some data might not be marshalable + } + + // Test JSON unmarshaling + var unmarshaled HexBytes + err = json.Unmarshal(jsonData, &unmarshaled) + if err != nil { + return // Some JSON might not be unmarshalable + } + + // If both operations succeeded, the data should be the same + if len(data) > 0 && string(hexBytes) != string(unmarshaled) { + t.Errorf("HexBytes roundtrip failed: original %v, got %v", hexBytes, unmarshaled) + } + }) +} + +// FuzzMetadataFieldsValidation tests validation of metadata fields +func FuzzMetadataFieldsValidation(f *testing.F) { + generator := helpers.NewFuzzDataGenerator(uint64(time.Now().UnixNano()), 0) + + // Add seed test cases + f.Add("root", int64(1), "1.0.31") + f.Add("targets", int64(999), "1.0.0") + f.Add("snapshot", int64(0), "") + f.Add("timestamp", int64(-1), "invalid") + + // Add random cases + for i := 0; i < 5; i++ { + f.Add( + generator.GenerateRandomString(20), + int64(generator.GenerateRandomInt(1000000)-500000), + generator.GenerateRandomString(10), + ) + } + + f.Fuzz(func(t *testing.T, metadataType string, version int64, specVersion string) { + defer func() { + if r := recover(); r != nil { + t.Errorf("Metadata field validation panicked with type=%s, version=%d, spec=%s: %v", + metadataType, version, specVersion, r) + } + }() + + // Create metadata structure with fuzz data + metadata := map[string]interface{}{ + "signed": map[string]interface{}{ + "_type": metadataType, + "version": version, + "spec_version": specVersion, + "expires": time.Now().Add(24 * time.Hour).Format(time.RFC3339), + }, + "signatures": []interface{}{}, + } + + jsonData, err := json.Marshal(metadata) + if err != nil { + return + } + + // Test parsing with each metadata type + _, _ = Root().FromBytes(jsonData) + _, _ = Targets().FromBytes(jsonData) + _, _ = Snapshot().FromBytes(jsonData) + _, _ = Timestamp().FromBytes(jsonData) + }) +} + +// FuzzSignatureOperations tests signature-related operations +func FuzzSignatureOperations(f *testing.F) { + generator := helpers.NewFuzzDataGenerator(uint64(time.Now().UnixNano()), 0) + + // Add seed data + f.Add("valid-keyid", []byte("signature-data")) + f.Add("", []byte("")) + f.Add("very-long-"+generator.GenerateRandomString(1000), generator.GenerateRandomBytes(1000)) + + for i := 0; i < 5; i++ { + f.Add( + generator.GenerateRandomString(64), + generator.GenerateRandomBytes(128), + ) + } + + f.Fuzz(func(t *testing.T, keyID string, sigData []byte) { + defer func() { + if r := recover(); r != nil { + t.Errorf("Signature operations panicked with keyID=%s, sigData len=%d: %v", + keyID, len(sigData), r) + } + }() + + // Create signature structure + sig := Signature{ + KeyID: keyID, + Signature: HexBytes(sigData), + } + + // Test JSON marshaling + jsonData, err := json.Marshal(sig) + if err != nil { + return + } + + // Test JSON unmarshaling + var unmarshaled Signature + err = json.Unmarshal(jsonData, &unmarshaled) + if err != nil { + return + } + + // Test adding to metadata + root := Root() + root.Signatures = append(root.Signatures, sig) + + // Test serialization + _, _ = root.ToBytes(false) + }) +} + +// FuzzCompleteMetadataStructure tests complete metadata structure with random data +func FuzzCompleteMetadataStructure(f *testing.F) { + generator := helpers.NewFuzzDataGenerator(uint64(time.Now().UnixNano()), 0) + + // Add seed data for complete metadata structures + for _, metadataType := range []string{"root", "targets", "snapshot", "timestamp"} { + f.Add(generator.CreateFuzzTestMetadata(metadataType)) + } + + // Add some edge cases + f.Add([]byte(`{"signed": {}, "signatures": []}`)) + f.Add([]byte(`{"signed": {"_type": "root", "version": 999999999}, "signatures": []}`)) + + f.Fuzz(func(t *testing.T, data []byte) { + defer func() { + if r := recover(); r != nil { + t.Errorf("Complete metadata structure test panicked with input len=%d: %v", + len(data), r) + } + }() + + // Test parsing with all metadata types + metadataTypes := []func() interface{}{ + func() interface{} { m, _ := Root().FromBytes(data); return m }, + func() interface{} { m, _ := Targets().FromBytes(data); return m }, + func() interface{} { m, _ := Snapshot().FromBytes(data); return m }, + func() interface{} { m, _ := Timestamp().FromBytes(data); return m }, + } + + for _, parseFunc := range metadataTypes { + parseFunc() + } + + // Test if it's valid JSON at all + var v interface{} + if err := json.Unmarshal(data, &v); err == nil { + // If it's valid JSON, test re-marshaling + _, _ = json.Marshal(v) + } + }) +} diff --git a/metadata/metadata_table_test.go b/metadata/metadata_table_test.go new file mode 100644 index 00000000..41370dbd --- /dev/null +++ b/metadata/metadata_table_test.go @@ -0,0 +1,492 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package metadata + +import ( + "encoding/json" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/theupdateframework/go-tuf/v2/internal/testutils/helpers" +) + +func TestMetadataCreation(t *testing.T) { + fixedExpire := time.Date(2030, 8, 15, 14, 30, 45, 100, time.UTC) + + tests := []struct { + name string + createFunc func() any + expectedType string + }{ + { + name: "Root creation with default expiry", + createFunc: func() any { return Root() }, + expectedType: ROOT, + }, + { + name: "Root creation with fixed expiry", + createFunc: func() any { return Root(fixedExpire) }, + expectedType: ROOT, + }, + { + name: "Targets creation with default expiry", + createFunc: func() any { return Targets() }, + expectedType: TARGETS, + }, + { + name: "Targets creation with fixed expiry", + createFunc: func() any { return Targets(fixedExpire) }, + expectedType: TARGETS, + }, + { + name: "Snapshot creation with default expiry", + createFunc: func() any { return Snapshot() }, + expectedType: SNAPSHOT, + }, + { + name: "Snapshot creation with fixed expiry", + createFunc: func() any { return Snapshot(fixedExpire) }, + expectedType: SNAPSHOT, + }, + { + name: "Timestamp creation with default expiry", + createFunc: func() any { return Timestamp() }, + expectedType: TIMESTAMP, + }, + { + name: "Timestamp creation with fixed expiry", + createFunc: func() any { return Timestamp(fixedExpire) }, + expectedType: TIMESTAMP, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.createFunc() + assert.NotNil(t, result) + + switch meta := result.(type) { + case *Metadata[RootType]: + assert.Equal(t, tt.expectedType, meta.Signed.Type) + assert.Equal(t, SPECIFICATION_VERSION, meta.Signed.SpecVersion) + assert.Equal(t, int64(1), meta.Signed.Version) + assert.NotNil(t, meta.Signed.Keys) + assert.NotNil(t, meta.Signed.Roles) + case *Metadata[TargetsType]: + assert.Equal(t, tt.expectedType, meta.Signed.Type) + assert.Equal(t, SPECIFICATION_VERSION, meta.Signed.SpecVersion) + assert.Equal(t, int64(1), meta.Signed.Version) + assert.NotNil(t, meta.Signed.Targets) + case *Metadata[SnapshotType]: + assert.Equal(t, tt.expectedType, meta.Signed.Type) + assert.Equal(t, SPECIFICATION_VERSION, meta.Signed.SpecVersion) + assert.Equal(t, int64(1), meta.Signed.Version) + assert.NotNil(t, meta.Signed.Meta) + case *Metadata[TimestampType]: + assert.Equal(t, tt.expectedType, meta.Signed.Type) + assert.Equal(t, SPECIFICATION_VERSION, meta.Signed.SpecVersion) + assert.Equal(t, int64(1), meta.Signed.Version) + assert.NotNil(t, meta.Signed.Meta) + } + }) + } +} + +func TestMetadataFromBytes(t *testing.T) { + validRoot := helpers.CreateTestRootJSON(t) + validTargets := helpers.CreateTestTargetsJSON(t) + validSnapshot := helpers.CreateTestSnapshotJSON(t) + validTimestamp := helpers.CreateTestTimestampJSON(t) + invalidData := helpers.CreateInvalidJSON() + + tests := []struct { + name string + metadataType string + data []byte + wantErr bool + errorMsg string + }{ + { + name: "Valid Root from bytes", + metadataType: ROOT, + data: validRoot, + }, + { + name: "Valid Targets from bytes", + metadataType: TARGETS, + data: validTargets, + }, + { + name: "Valid Snapshot from bytes", + metadataType: SNAPSHOT, + data: validSnapshot, + }, + { + name: "Valid Timestamp from bytes", + metadataType: TIMESTAMP, + data: validTimestamp, + }, + { + name: "Empty data", + metadataType: ROOT, + data: invalidData["empty"], + wantErr: true, + errorMsg: "unexpected end of JSON input", + }, + { + name: "Invalid JSON", + metadataType: ROOT, + data: invalidData["invalid_json"], + wantErr: true, + errorMsg: "invalid character", + }, + { + name: "Missing signed field", + metadataType: ROOT, + data: invalidData["missing_signed"], + wantErr: true, + }, + { + name: "Wrong metadata type", + metadataType: ROOT, + data: invalidData["wrong_type"], + wantErr: true, + errorMsg: "expected metadata type root", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var err error + switch tt.metadataType { + case ROOT: + _, err = Root().FromBytes(tt.data) + case TARGETS: + _, err = Targets().FromBytes(tt.data) + case SNAPSHOT: + _, err = Snapshot().FromBytes(tt.data) + case TIMESTAMP: + _, err = Timestamp().FromBytes(tt.data) + } + + if tt.wantErr { + assert.Error(t, err) + if tt.errorMsg != "" { + assert.Contains(t, err.Error(), tt.errorMsg) + } + return + } + assert.NoError(t, err) + }) + } +} + +func TestMetadataFromFile(t *testing.T) { + testDir := t.TempDir() + + validRoot := helpers.CreateTestRootJSON(t) + validTargets := helpers.CreateTestTargetsJSON(t) + + rootFile := helpers.WriteTestFile(t, testDir, "root.json", validRoot) + targetsFile := helpers.WriteTestFile(t, testDir, "targets.json", validTargets) + helpers.WriteTestFile(t, testDir, "invalid.json", []byte("{invalid json}")) + + tests := []struct { + name string + metadataType string + filePath string + wantErr bool + errorMsg string + }{ + { + name: "Valid Root from file", + metadataType: ROOT, + filePath: rootFile, + }, + { + name: "Valid Targets from file", + metadataType: TARGETS, + filePath: targetsFile, + }, + { + name: "Non-existent file", + metadataType: ROOT, + filePath: filepath.Join(testDir, "nonexistent.json"), + wantErr: true, + errorMsg: "no such file or directory", + }, + { + name: "Invalid JSON file", + metadataType: ROOT, + filePath: filepath.Join(testDir, "invalid.json"), + wantErr: true, + errorMsg: "invalid character", + }, + { + name: "Wrong metadata type in file", + metadataType: TARGETS, + filePath: rootFile, + wantErr: true, + errorMsg: "expected metadata type targets", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var err error + switch tt.metadataType { + case ROOT: + _, err = Root().FromFile(tt.filePath) + case TARGETS: + _, err = Targets().FromFile(tt.filePath) + case SNAPSHOT: + _, err = Snapshot().FromFile(tt.filePath) + case TIMESTAMP: + _, err = Timestamp().FromFile(tt.filePath) + } + + if tt.wantErr { + assert.Error(t, err) + if tt.errorMsg != "" { + assert.Contains(t, err.Error(), tt.errorMsg) + } + return + } + assert.NoError(t, err) + }) + } +} + +func TestMetadataToBytes(t *testing.T) { + expiry := time.Now().UTC().Add(24 * time.Hour) + + tests := []struct { + name string + metadata any + compact bool + wantErr bool + }{ + {name: "Root to bytes compact", metadata: Root(expiry), compact: true}, + {name: "Root to bytes non-compact", metadata: Root(expiry), compact: false}, + {name: "Targets to bytes", metadata: Targets(expiry), compact: true}, + {name: "Snapshot to bytes", metadata: Snapshot(expiry), compact: true}, + {name: "Timestamp to bytes", metadata: Timestamp(expiry), compact: true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ( + data []byte + err error + ) + switch meta := tt.metadata.(type) { + case *Metadata[RootType]: + data, err = meta.ToBytes(tt.compact) + case *Metadata[TargetsType]: + data, err = meta.ToBytes(tt.compact) + case *Metadata[SnapshotType]: + data, err = meta.ToBytes(tt.compact) + case *Metadata[TimestampType]: + data, err = meta.ToBytes(tt.compact) + } + + if tt.wantErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.NotEmpty(t, data) + + // Verify the output is valid JSON. + var jsonData any + assert.NoError(t, json.Unmarshal(data, &jsonData)) + }) + } +} + +func TestMetadataToFile(t *testing.T) { + testDir := t.TempDir() + expiry := time.Now().UTC().Add(24 * time.Hour) + + tests := []struct { + name string + metadata any + filename string + compact bool + wantErr bool + }{ + {name: "Root to file", metadata: Root(expiry), filename: "root.json"}, + {name: "Targets to file compact", metadata: Targets(expiry), filename: "targets.json", compact: true}, + {name: "Snapshot to file", metadata: Snapshot(expiry), filename: "snapshot.json"}, + {name: "Timestamp to file", metadata: Timestamp(expiry), filename: "timestamp.json"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + path := filepath.Join(testDir, tt.filename) + var err error + switch meta := tt.metadata.(type) { + case *Metadata[RootType]: + err = meta.ToFile(path, tt.compact) + case *Metadata[TargetsType]: + err = meta.ToFile(path, tt.compact) + case *Metadata[SnapshotType]: + err = meta.ToFile(path, tt.compact) + case *Metadata[TimestampType]: + err = meta.ToFile(path, tt.compact) + } + + if tt.wantErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + + // Verify the file contains valid JSON. + raw, err := os.ReadFile(path) + assert.NoError(t, err) + var jsonData any + assert.NoError(t, json.Unmarshal(raw, &jsonData)) + }) + } +} + +func TestMetadataRoundTrip(t *testing.T) { + testDir := t.TempDir() + expiry := time.Now().UTC().Add(24 * time.Hour) + + tests := []struct { + name string + metadata any + filename string + }{ + {name: "Root roundtrip", metadata: Root(expiry), filename: "root.json"}, + {name: "Targets roundtrip", metadata: Targets(expiry), filename: "targets.json"}, + {name: "Snapshot roundtrip", metadata: Snapshot(expiry), filename: "snapshot.json"}, + {name: "Timestamp roundtrip", metadata: Timestamp(expiry), filename: "timestamp.json"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + path := filepath.Join(testDir, tt.filename) + + switch meta := tt.metadata.(type) { + case *Metadata[RootType]: + assert.NoError(t, meta.ToFile(path, false)) + loaded, err := Root().FromFile(path) + assert.NoError(t, err) + assert.Equal(t, meta.Signed.Type, loaded.Signed.Type) + assert.Equal(t, meta.Signed.Version, loaded.Signed.Version) + assert.Equal(t, meta.Signed.SpecVersion, loaded.Signed.SpecVersion) + + case *Metadata[TargetsType]: + assert.NoError(t, meta.ToFile(path, false)) + loaded, err := Targets().FromFile(path) + assert.NoError(t, err) + assert.Equal(t, meta.Signed.Type, loaded.Signed.Type) + assert.Equal(t, meta.Signed.Version, loaded.Signed.Version) + assert.Equal(t, meta.Signed.SpecVersion, loaded.Signed.SpecVersion) + + case *Metadata[SnapshotType]: + assert.NoError(t, meta.ToFile(path, false)) + loaded, err := Snapshot().FromFile(path) + assert.NoError(t, err) + assert.Equal(t, meta.Signed.Type, loaded.Signed.Type) + assert.Equal(t, meta.Signed.Version, loaded.Signed.Version) + assert.Equal(t, meta.Signed.SpecVersion, loaded.Signed.SpecVersion) + + case *Metadata[TimestampType]: + assert.NoError(t, meta.ToFile(path, false)) + loaded, err := Timestamp().FromFile(path) + assert.NoError(t, err) + assert.Equal(t, meta.Signed.Type, loaded.Signed.Type) + assert.Equal(t, meta.Signed.Version, loaded.Signed.Version) + assert.Equal(t, meta.Signed.SpecVersion, loaded.Signed.SpecVersion) + } + }) + } +} + +func TestMetadataVersioning(t *testing.T) { + expiry := time.Now().UTC().Add(24 * time.Hour) + + tests := []struct { + name string + metadata any + newVersion int64 + }{ + {name: "Increment Root version", metadata: Root(expiry), newVersion: 2}, + {name: "Set high version number", metadata: Targets(expiry), newVersion: 1_000_000}, + // Version 0 is below the valid minimum but the library permits setting it + // directly; enforcement happens at validation/update time. + {name: "Zero version (below minimum)", metadata: Snapshot(expiry), newVersion: 0}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + switch meta := tt.metadata.(type) { + case *Metadata[RootType]: + meta.Signed.Version = tt.newVersion + assert.Equal(t, tt.newVersion, meta.Signed.Version) + case *Metadata[TargetsType]: + meta.Signed.Version = tt.newVersion + assert.Equal(t, tt.newVersion, meta.Signed.Version) + case *Metadata[SnapshotType]: + meta.Signed.Version = tt.newVersion + assert.Equal(t, tt.newVersion, meta.Signed.Version) + case *Metadata[TimestampType]: + meta.Signed.Version = tt.newVersion + assert.Equal(t, tt.newVersion, meta.Signed.Version) + } + }) + } +} + +func TestMetadataExpiration(t *testing.T) { + now := time.Now().UTC() + past := now.Add(-24 * time.Hour) + future := now.Add(24 * time.Hour) + + tests := []struct { + name string + metadata any + expires time.Time + isExpired bool + }{ + {name: "Root not expired", metadata: Root(future), expires: future, isExpired: false}, + {name: "Root expired", metadata: Root(past), expires: past, isExpired: true}, + {name: "Targets not expired", metadata: Targets(future), expires: future, isExpired: false}, + {name: "Targets expired", metadata: Targets(past), expires: past, isExpired: true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + switch meta := tt.metadata.(type) { + case *Metadata[RootType]: + assert.Equal(t, tt.expires.Truncate(time.Second), meta.Signed.Expires.Truncate(time.Second)) + assert.Equal(t, tt.isExpired, meta.Signed.Expires.Before(now)) + case *Metadata[TargetsType]: + assert.Equal(t, tt.expires.Truncate(time.Second), meta.Signed.Expires.Truncate(time.Second)) + assert.Equal(t, tt.isExpired, meta.Signed.Expires.Before(now)) + } + }) + } +} diff --git a/metadata/multirepo/multirepo.go b/metadata/multirepo/multirepo.go index e48cfaf8..a36fd605 100644 --- a/metadata/multirepo/multirepo.go +++ b/metadata/multirepo/multirepo.go @@ -31,9 +31,15 @@ import ( "github.com/theupdateframework/go-tuf/v2/metadata/updater" ) -// ErrInvalidRepoName is returned when a repository name contains path traversal -// components or is otherwise invalid for use as a directory name. -var ErrInvalidRepoName = errors.New("invalid repository name") +var ( + // ErrNoMapFile is returned when no map file is provided + ErrNoMapFile = errors.New("no map file provided") + // ErrNoTrustedRoots is returned when no trusted root metadata is provided + ErrNoTrustedRoots = errors.New("no trusted root metadata provided") + // ErrInvalidRepoName is returned when a repository name contains path traversal + // components or is otherwise invalid for use as a directory name. + ErrInvalidRepoName = errors.New("invalid repository name") +) // validRepoNamePattern defines the allowed characters for repository names. // Names must start with an alphanumeric character and may contain alphanumeric @@ -84,13 +90,13 @@ type targetMatch struct { func NewConfig(repoMap []byte, roots map[string][]byte) (*MultiRepoConfig, error) { // error if we don't have the necessary arguments if len(repoMap) == 0 || len(roots) == 0 { - return nil, fmt.Errorf("failed to create multi-repository config: no map file and/or trusted root metadata is provided") + return nil, fmt.Errorf("failed to create multi-repository config: %w and/or %w", ErrNoMapFile, ErrNoTrustedRoots) } // unmarshal the map file (note: should we expect/support unrecognized values here?) var mapFile *MultiRepoMapType if err := json.Unmarshal(repoMap, &mapFile); err != nil { - return nil, err + return nil, fmt.Errorf("failed to unmarshal map file: %w", err) } // make sure we have enough trusted root metadata files provided based on the repository list @@ -98,7 +104,7 @@ func NewConfig(repoMap []byte, roots map[string][]byte) (*MultiRepoConfig, error // check if we have a trusted root metadata for this repository _, ok := roots[repo] if !ok { - return nil, fmt.Errorf("no trusted root metadata provided for repository - %s", repo) + return nil, fmt.Errorf("%w for repository - %s", ErrNoTrustedRoots, repo) } } diff --git a/metadata/multirepo/multirepo_test.go b/metadata/multirepo/multirepo_test.go index 223674c0..81bfb763 100644 --- a/metadata/multirepo/multirepo_test.go +++ b/metadata/multirepo/multirepo_test.go @@ -20,8 +20,79 @@ package multirepo import ( "errors" "testing" + + "github.com/stretchr/testify/assert" ) +func TestNewConfig(t *testing.T) { + validMapJSON := []byte(`{ + "repositories": { + "test-repo": ["https://example.com/repo"] + }, + "mapping": [] + }`) + + tests := []struct { + name string + desc string + repoMap []byte + roots map[string][]byte + wantErr bool + }{ + { + name: "empty map file returns error", + desc: "Creating config with empty map file should fail", + repoMap: []byte(""), + roots: map[string][]byte{}, + wantErr: true, + }, + { + name: "empty roots returns error", + desc: "Creating config with empty roots should fail", + repoMap: validMapJSON, + roots: map[string][]byte{}, + wantErr: true, + }, + { + name: "valid config succeeds", + desc: "Creating config with valid map and roots should succeed", + repoMap: validMapJSON, + roots: map[string][]byte{"test-repo": []byte(`{"signatures":[],"signed":{}}`)}, + wantErr: false, + }, + { + name: "missing root for repo returns error", + desc: "Creating config with missing root metadata for a repository should fail", + repoMap: validMapJSON, + roots: map[string][]byte{"other-repo": []byte(`{"signatures":[],"signed":{}}`)}, + wantErr: true, + }, + { + name: "invalid JSON map file returns error", + desc: "Creating config with invalid JSON should fail", + repoMap: []byte(`{invalid json}`), + roots: map[string][]byte{"test-repo": []byte(`{"signatures":[],"signed":{}}`)}, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Logf("Desc: %s", tt.desc) + + cfg, err := NewConfig(tt.repoMap, tt.roots) + + if tt.wantErr { + assert.Error(t, err, "expected error but got none") + return + } + + assert.NoError(t, err, "expected no error but got %v", err) + assert.NotNil(t, cfg, "expected config to be non-nil") + }) + } +} + func TestValidateRepoName(t *testing.T) { tests := []struct { name string @@ -122,4 +193,4 @@ func TestNewRejectsInvalidRepoNames(t *testing.T) { } }) } -} \ No newline at end of file +} diff --git a/metadata/updater/updater_table_test.go b/metadata/updater/updater_table_test.go new file mode 100644 index 00000000..3b48e82f --- /dev/null +++ b/metadata/updater/updater_table_test.go @@ -0,0 +1,627 @@ +// Copyright 2024 The Update Framework Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License +// +// SPDX-License-Identifier: Apache-2.0 +// + +package updater + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/theupdateframework/go-tuf/v2/internal/testutils/simulator" + "github.com/theupdateframework/go-tuf/v2/metadata" +) + +// createAndRefresh creates an updater for the test repository and runs Refresh +func createAndRefresh(t *testing.T, repo *simulator.TestRepository) (*Updater, error) { + t.Helper() + cfg, err := repo.GetUpdaterConfig() + if err != nil { + return nil, err + } + up, err := New(cfg) + if err != nil { + return nil, err + } + return up, up.Refresh() +} + +// TestRootUpdatesTable tests root metadata update scenarios using table-driven tests +func TestRootUpdatesTable(t *testing.T) { + tests := []struct { + name string + setup func(t *testing.T, repo *simulator.TestRepository) + wantErr bool + wantErrType error + wantErrMsg string + assert func(t *testing.T, repo *simulator.TestRepository) + }{ + { + name: "first time refresh succeeds", + setup: func(t *testing.T, repo *simulator.TestRepository) { + repo.BumpVersion(metadata.ROOT) + }, + wantErr: false, + assert: func(t *testing.T, repo *simulator.TestRepository) { + repo.AssertFilesExist(metadata.TOP_LEVEL_ROLE_NAMES[:]) + }, + }, + { + name: "trusted root expired fails", + setup: func(t *testing.T, repo *simulator.TestRepository) { + repo.SetExpired(metadata.ROOT) + repo.BumpVersion(metadata.ROOT) + }, + wantErr: true, + wantErrType: &metadata.ErrExpiredMetadata{}, + wantErrMsg: "final root.json is expired", + assert: func(t *testing.T, repo *simulator.TestRepository) { + repo.AssertFilesExist([]string{metadata.ROOT}) + }, + }, + { + name: "new root same version fails", + setup: func(t *testing.T, repo *simulator.TestRepository) { + // Publishing without bumping version causes same version error + repo.PublishRoot() + }, + wantErr: true, + wantErrType: &metadata.ErrBadVersionNumber{}, + wantErrMsg: "bad version number, expected 2, got 1", + assert: func(t *testing.T, repo *simulator.TestRepository) { + repo.AssertFilesExist([]string{metadata.ROOT}) + }, + }, + { + name: "new root non-consecutive version fails", + setup: func(t *testing.T, repo *simulator.TestRepository) { + repo.Simulator.MDRoot.Signed.Version += 2 + repo.PublishRoot() + }, + wantErr: true, + wantErrType: &metadata.ErrBadVersionNumber{}, + wantErrMsg: "bad version number, expected 2, got 3", + assert: func(t *testing.T, repo *simulator.TestRepository) { + repo.AssertFilesExist([]string{metadata.ROOT}) + }, + }, + { + name: "intermediate root expired succeeds if final is valid", + setup: func(t *testing.T, repo *simulator.TestRepository) { + // Intermediate root v2 is expired + repo.SetExpired(metadata.ROOT) + repo.BumpVersion(metadata.ROOT) + // Final root v3 is up to date + repo.Simulator.MDRoot.Signed.Expires = repo.Simulator.SafeExpiry + repo.BumpVersion(metadata.ROOT) + }, + wantErr: false, + assert: func(t *testing.T, repo *simulator.TestRepository) { + repo.AssertFilesExist(metadata.TOP_LEVEL_ROLE_NAMES[:]) + repo.AssertVersionEquals(metadata.ROOT, 3) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + // Create fresh test repository + repo := simulator.NewTestRepository(t) + defer repo.Cleanup() + + // Run setup + if tc.setup != nil { + tc.setup(t, repo) + } + + // Run refresh + _, err := createAndRefresh(t, repo) + + // Check error expectations + if tc.wantErr { + assert.Error(t, err) + if tc.wantErrType != nil { + assert.ErrorIs(t, err, tc.wantErrType) + } + if tc.wantErrMsg != "" { + assert.ErrorContains(t, err, tc.wantErrMsg) + } + } else { + assert.NoError(t, err) + } + + // Run additional assertions + if tc.assert != nil { + tc.assert(t, repo) + } + }) + } +} + +// TestTimestampUpdatesTable tests timestamp metadata update scenarios +func TestTimestampUpdatesTable(t *testing.T) { + tests := []struct { + name string + setup func(t *testing.T, repo *simulator.TestRepository) + wantErr bool + wantErrType error + wantErrMsg string + assert func(t *testing.T, repo *simulator.TestRepository) + }{ + { + name: "new timestamp unsigned fails", + setup: func(t *testing.T, repo *simulator.TestRepository) { + repo.RemoveSigners(metadata.TIMESTAMP) + }, + wantErr: true, + wantErrType: &metadata.ErrUnsignedMetadata{}, + wantErrMsg: "Verifying timestamp failed, not enough signatures, got 0, want 1", + assert: func(t *testing.T, repo *simulator.TestRepository) { + repo.AssertFilesExist([]string{metadata.ROOT}) + }, + }, + { + name: "new timestamp expired fails", + setup: func(t *testing.T, repo *simulator.TestRepository) { + repo.SetExpired(metadata.TIMESTAMP) + repo.UpdateTimestamp() + }, + wantErr: true, + wantErrType: &metadata.ErrExpiredMetadata{}, + wantErrMsg: "timestamp.json is expired", + assert: func(t *testing.T, repo *simulator.TestRepository) { + repo.AssertFilesExist([]string{metadata.ROOT}) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + repo := simulator.NewTestRepository(t) + defer repo.Cleanup() + + if tc.setup != nil { + tc.setup(t, repo) + } + + _, err := createAndRefresh(t, repo) + + if tc.wantErr { + assert.Error(t, err) + if tc.wantErrType != nil { + assert.ErrorIs(t, err, tc.wantErrType) + } + if tc.wantErrMsg != "" { + assert.ErrorContains(t, err, tc.wantErrMsg) + } + } else { + assert.NoError(t, err) + } + + if tc.assert != nil { + tc.assert(t, repo) + } + }) + } +} + +// TestSnapshotUpdatesTable tests snapshot metadata update scenarios +func TestSnapshotUpdatesTable(t *testing.T) { + tests := []struct { + name string + setup func(t *testing.T, repo *simulator.TestRepository) + wantErr bool + wantErrType error + wantErrMsg string + assert func(t *testing.T, repo *simulator.TestRepository) + }{ + { + name: "new snapshot unsigned fails", + setup: func(t *testing.T, repo *simulator.TestRepository) { + repo.RemoveSigners(metadata.SNAPSHOT) + }, + wantErr: true, + wantErrType: &metadata.ErrUnsignedMetadata{}, + wantErrMsg: "Verifying snapshot failed, not enough signatures, got 0, want 1", + assert: func(t *testing.T, repo *simulator.TestRepository) { + repo.AssertFilesExist([]string{metadata.ROOT, metadata.TIMESTAMP}) + }, + }, + { + name: "new snapshot expired fails", + setup: func(t *testing.T, repo *simulator.TestRepository) { + repo.SetExpired(metadata.SNAPSHOT) + repo.UpdateSnapshot() + }, + wantErr: true, + wantErrType: &metadata.ErrExpiredMetadata{}, + wantErrMsg: "snapshot.json is expired", + assert: func(t *testing.T, repo *simulator.TestRepository) { + repo.AssertFilesExist([]string{metadata.ROOT}) + }, + }, + { + name: "new snapshot version mismatch fails", + setup: func(t *testing.T, repo *simulator.TestRepository) { + // Increase snapshot version without updating timestamp + repo.Simulator.MDSnapshot.Signed.Version++ + }, + wantErr: true, + wantErrType: &metadata.ErrBadVersionNumber{}, + wantErrMsg: "expected 1, got 2", + assert: func(t *testing.T, repo *simulator.TestRepository) { + repo.AssertFilesExist([]string{metadata.ROOT, metadata.TIMESTAMP}) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + repo := simulator.NewTestRepository(t) + defer repo.Cleanup() + + if tc.setup != nil { + tc.setup(t, repo) + } + + _, err := createAndRefresh(t, repo) + + if tc.wantErr { + assert.Error(t, err) + if tc.wantErrType != nil { + assert.ErrorIs(t, err, tc.wantErrType) + } + if tc.wantErrMsg != "" { + assert.ErrorContains(t, err, tc.wantErrMsg) + } + } else { + assert.NoError(t, err) + } + + if tc.assert != nil { + tc.assert(t, repo) + } + }) + } +} + +// TestTargetsUpdatesTable tests targets metadata update scenarios +func TestTargetsUpdatesTable(t *testing.T) { + tests := []struct { + name string + setup func(t *testing.T, repo *simulator.TestRepository) + wantErr bool + wantErrType error + wantErrMsg string + assert func(t *testing.T, repo *simulator.TestRepository) + }{ + { + name: "new targets unsigned fails", + setup: func(t *testing.T, repo *simulator.TestRepository) { + repo.RemoveSigners(metadata.TARGETS) + }, + wantErr: true, + wantErrType: &metadata.ErrUnsignedMetadata{}, + wantErrMsg: "Verifying targets failed, not enough signatures, got 0, want 1", + assert: func(t *testing.T, repo *simulator.TestRepository) { + repo.AssertFilesExist([]string{metadata.ROOT, metadata.TIMESTAMP, metadata.SNAPSHOT}) + }, + }, + { + name: "new targets expired fails", + setup: func(t *testing.T, repo *simulator.TestRepository) { + repo.SetExpired(metadata.TARGETS) + repo.UpdateSnapshot() + }, + wantErr: true, + wantErrType: &metadata.ErrExpiredMetadata{}, + wantErrMsg: "new targets is expired", + assert: func(t *testing.T, repo *simulator.TestRepository) { + repo.AssertFilesExist([]string{metadata.ROOT, metadata.TIMESTAMP, metadata.SNAPSHOT}) + }, + }, + { + name: "new targets version mismatch fails", + setup: func(t *testing.T, repo *simulator.TestRepository) { + // Increase targets version without updating snapshot + repo.Simulator.MDTargets.Signed.Version++ + }, + wantErr: true, + wantErrType: &metadata.ErrBadVersionNumber{}, + wantErrMsg: "expected targets version 1, got 2", + assert: func(t *testing.T, repo *simulator.TestRepository) { + repo.AssertFilesExist([]string{metadata.ROOT, metadata.TIMESTAMP, metadata.SNAPSHOT}) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + repo := simulator.NewTestRepository(t) + defer repo.Cleanup() + + if tc.setup != nil { + tc.setup(t, repo) + } + + _, err := createAndRefresh(t, repo) + + if tc.wantErr { + assert.Error(t, err) + if tc.wantErrType != nil { + assert.ErrorIs(t, err, tc.wantErrType) + } + if tc.wantErrMsg != "" { + assert.ErrorContains(t, err, tc.wantErrMsg) + } + } else { + assert.NoError(t, err) + } + + if tc.assert != nil { + tc.assert(t, repo) + } + }) + } +} + +// TestTimestampFastForwardRecovery tests timestamp fast-forward attack recovery +func TestTimestampFastForwardRecovery(t *testing.T) { + repo := simulator.NewTestRepository(t) + defer repo.Cleanup() + + // Attacker updates timestamp to a higher version + repo.Simulator.MDTimestamp.Signed.Version = 99999 + + // Client refreshes and sees new version + _, err := createAndRefresh(t, repo) + assert.NoError(t, err) + repo.AssertVersionEquals(metadata.TIMESTAMP, 99999) + + // Repository rotates timestamp keys + repo.RotateKeys(metadata.TIMESTAMP) + repo.BumpVersion(metadata.ROOT) + + // Roll back timestamp version + repo.Simulator.MDTimestamp.Signed.Version = 1 + + // Client refreshes and sees initial version + _, err = createAndRefresh(t, repo) + assert.NoError(t, err) + repo.AssertVersionEquals(metadata.TIMESTAMP, 1) +} + +// TestSnapshotFastForwardRecovery tests snapshot fast-forward attack recovery +func TestSnapshotFastForwardRecovery(t *testing.T) { + repo := simulator.NewTestRepository(t) + defer repo.Cleanup() + + // Attacker updates snapshot to a higher version (bumping timestamp is required) + repo.Simulator.MDSnapshot.Signed.Version = 99999 + repo.UpdateTimestamp() + + // Client refreshes and sees new version + _, err := createAndRefresh(t, repo) + assert.NoError(t, err) + repo.AssertVersionEquals(metadata.SNAPSHOT, 99999) + + // Repository rotates snapshot & timestamp keys + repo.RotateKeys(metadata.SNAPSHOT) + repo.RotateKeys(metadata.TIMESTAMP) + repo.BumpVersion(metadata.ROOT) + + // Roll back snapshot version + repo.Simulator.MDSnapshot.Signed.Version = 1 + repo.UpdateTimestamp() + + // Client refreshes and sees initial version + _, err = createAndRefresh(t, repo) + assert.NoError(t, err) + repo.AssertVersionEquals(metadata.SNAPSHOT, 1) +} + +// TestTargetsFastForwardRecovery tests targets fast-forward attack recovery +func TestTargetsFastForwardRecovery(t *testing.T) { + repo := simulator.NewTestRepository(t) + defer repo.Cleanup() + + // Attacker updates targets to a higher version + repo.Simulator.MDTargets.Signed.Version = 99999 + repo.UpdateSnapshot() + + // Client refreshes and sees new version + _, err := createAndRefresh(t, repo) + assert.NoError(t, err) + repo.AssertVersionEquals(metadata.TARGETS, 99999) + + // Repository rotates snapshot keys (which resets targets tracking) + repo.RotateKeys(metadata.SNAPSHOT) + repo.BumpVersion(metadata.ROOT) + + // Roll back targets version + repo.Simulator.MDTargets.Signed.Version = 1 + repo.UpdateSnapshot() + + // Client refreshes and sees initial version + _, err = createAndRefresh(t, repo) + assert.NoError(t, err) + repo.AssertVersionEquals(metadata.TARGETS, 1) +} + +// TestVersionRollbackTable tests version rollback attack detection +func TestVersionRollbackTable(t *testing.T) { + tests := []struct { + name string + role string + }{ + { + name: "timestamp version rollback fails", + role: metadata.TIMESTAMP, + }, + { + name: "snapshot version rollback fails", + role: metadata.SNAPSHOT, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + repo := simulator.NewTestRepository(t) + defer repo.Cleanup() + + // Set initial higher version + repo.SetVersion(tc.role, 2) + repo.UpdateSnapshot() + + // First refresh succeeds + _, err := createAndRefresh(t, repo) + assert.NoError(t, err) + + // Now try to rollback + repo.SetVersion(tc.role, 1) + repo.UpdateSnapshot() + + // Second refresh should fail with rollback detection + _, err = createAndRefresh(t, repo) + assert.Error(t, err) + assert.ErrorIs(t, err, &metadata.ErrBadVersionNumber{}) + // Error message will contain "must be >= X" where X is the previously seen version + assert.ErrorContains(t, err, "must be >=") + }) + } +} + +// TestConsistentSnapshotTable tests consistent snapshot behavior +func TestConsistentSnapshotTable(t *testing.T) { + tests := []struct { + name string + consistentSnapshot bool + expectedMetadata []simulator.FTMetadata + }{ + { + name: "consistent snapshot disabled", + consistentSnapshot: false, + expectedMetadata: []simulator.FTMetadata{ + {Name: "root", Value: 2}, + {Name: "root", Value: 3}, + {Name: "timestamp", Value: -1}, + {Name: "snapshot", Value: -1}, + {Name: "targets", Value: -1}, + }, + }, + { + name: "consistent snapshot enabled", + consistentSnapshot: true, + expectedMetadata: []simulator.FTMetadata{ + {Name: "root", Value: 2}, + {Name: "root", Value: 3}, + {Name: "timestamp", Value: -1}, + {Name: "snapshot", Value: 1}, + {Name: "targets", Value: 1}, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + repo := simulator.NewTestRepository(t) + defer repo.Cleanup() + + repo.Simulator.MDRoot.Signed.ConsistentSnapshot = tc.consistentSnapshot + repo.BumpVersion(metadata.ROOT) + + // Get updater config and create updater manually to track fetch + cfg, err := repo.GetUpdaterConfig() + assert.NoError(t, err) + + updater, err := New(cfg) + assert.NoError(t, err) + + // Clear fetch tracker + repo.Simulator.FetchTracker.Metadata = []simulator.FTMetadata{} + + err = updater.Refresh() + assert.NoError(t, err) + + // Verify metadata was fetched with expected versions + assert.EqualValues(t, tc.expectedMetadata, repo.Simulator.FetchTracker.Metadata) + repo.AssertFilesExist(metadata.TOP_LEVEL_ROLE_NAMES[:]) + }) + } +} + +// TestHashMismatchTable tests hash mismatch detection scenarios +func TestHashMismatchTable(t *testing.T) { + tests := []struct { + name string + role string + wantErrType error + wantErrMsg string + }{ + { + name: "snapshot hash mismatch", + role: metadata.SNAPSHOT, + wantErrType: &metadata.ErrLengthOrHashMismatch{}, + wantErrMsg: "hash verification failed - mismatch for algorithm sha256", + }, + { + name: "targets hash mismatch", + role: metadata.TARGETS, + wantErrType: &metadata.ErrLengthOrHashMismatch{}, + wantErrMsg: "hash verification failed - mismatch for algorithm sha256", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + repo := simulator.NewTestRepository(t) + defer repo.Cleanup() + + // Enable hash computation and do initial update + repo.EnableComputeHashesAndLength() + repo.UpdateSnapshot() + + // First refresh succeeds + _, err := createAndRefresh(t, repo) + assert.NoError(t, err) + + // Modify metadata without updating hashes + switch tc.role { + case metadata.SNAPSHOT: + repo.Simulator.MDSnapshot.Signed.Expires = repo.Simulator.MDSnapshot.Signed.Expires.Add(time.Hour * 24) + repo.Simulator.MDSnapshot.Signed.Version++ + repo.Simulator.MDTimestamp.Signed.Meta["snapshot.json"].Version = repo.Simulator.MDSnapshot.Signed.Version + repo.Simulator.MDTimestamp.Signed.Version++ + case metadata.TARGETS: + repo.Simulator.MDTargets.Signed.Version++ + repo.Simulator.MDSnapshot.Signed.Meta["targets.json"].Version = repo.Simulator.MDTargets.Signed.Version + repo.Simulator.MDSnapshot.Signed.Version++ + repo.UpdateTimestamp() + } + + // Disable hash computation so hashes don't match + repo.DisableComputeHashesAndLength() + + // Second refresh should fail with hash mismatch + _, err = createAndRefresh(t, repo) + assert.Error(t, err) + assert.ErrorIs(t, err, tc.wantErrType) + assert.ErrorContains(t, err, tc.wantErrMsg) + }) + } +}