diff --git a/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md b/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md
new file mode 100644
index 00000000000..0206cfe1248
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md
@@ -0,0 +1,1311 @@
+# CERTIFICATE-TRANSPARENCY-GO Changelog
+
+## HEAD
+
+## v1.3.2
+
+### Misc
+
+* [migrillian] remove etcd support in #1699
+* Bump golangci-lint from 1.55.1 to 1.61.0 (developers should update to this version).
+* Update ctclient tool to support SCT extensions field by @liweitianux in https://github.com/google/certificate-transparency-go/pull/1645
+* Bump go to 1.23
+* [ct_hammer] support HTTPS and Bearer token for Authentication.
+* [preloader] support Bearer token Authentication for non temporal logs.
+* [preloader] support end indexes
+* [CTFE] Short cache max-age when get-entries returns fewer entries than requested by @robstradling in https://github.com/google/certificate-transparency-go/pull/1707
+* [CTFE] Disalllow mismatching signature algorithm identifiers in #702.
+* [jsonclient] surface HTTP Do and Read errors #1695 by @FiloSottile
+
+### CTFE Storage Saving: Extra Data Issuance Chain Deduplication
+
+* Suppress unnecessary duplicate key errors in the IssuanceChainStorage PostgreSQL implementation by @robstradling in https://github.com/google/certificate-transparency-go/pull/1678
+* Only store IssuanceChain if not cached by @robstradling in https://github.com/google/certificate-transparency-go/pull/1679
+
+### CTFE Rate Limiting Of Non-Fresh Submissions
+
+To protect a log from being flooded with requests for "old" certificates, optional rate limiting for "non-fresh submissions" can be configured by providing the following flags:
+
+- `non_fresh_submission_age`
+- `non_fresh_submission_burst`
+- `non_fresh_submission_limit`
+
+This can help to ensure that the log maintains its ability to (1) accept "fresh" submissions and (2) distribute all log entries to monitors.
+
+* [CTFE] Configurable mechanism to rate-limit non-fresh submissions by @robstradling in https://github.com/google/certificate-transparency-go/pull/1698
+
+### Dependency updates
+
+* Bump the docker-deps group across 5 directories with 3 updates (#1705)
+* Bump google.golang.org/grpc from 1.72.1 to 1.72.2 in the all-deps group (#1704)
+* Bump github.com/go-jose/go-jose/v4 in the go_modules group (#1700)
+* Bump the all-deps group with 7 updates (#1701)
+* Bump the all-deps group with 7 updates (#1693)
+* Bump the docker-deps group across 4 directories with 1 update (#1694)
+* Bump github/codeql-action from 3.28.13 to 3.28.16 in the all-deps group (#1692)
+* Bump the all-deps group across 1 directory with 7 updates (#1688)
+* Bump distroless/base-debian12 (#1686)
+* Bump golangci/golangci-lint-action from 6.5.1 to 7.0.0 in the all-deps group (#1685)
+* Bump the all-deps group with 4 updates (#1681)
+* Bump the all-deps group with 6 updates (#1683)
+* Bump the docker-deps group across 4 directories with 2 updates (#1682)
+* Bump github.com/golang-jwt/jwt/v4 in the go_modules group (#1680)
+* Bump golangci/golangci-lint-action in the all-deps group (#1676)
+* Bump the all-deps group with 2 updates (#1677)
+* Bump github/codeql-action from 3.28.10 to 3.28.11 in the all-deps group (#1670)
+* Bump the all-deps group with 8 updates (#1672)
+* Bump the docker-deps group across 4 directories with 1 update (#1671)
+* Bump the docker-deps group across 4 directories with 1 update (#1668)
+* Bump the all-deps group with 4 updates (#1666)
+* Bump golangci-lint from 1.55.1 to 1.61.0 (#1667)
+* Bump the all-deps group with 3 updates (#1665)
+* Bump github.com/spf13/cobra from 1.8.1 to 1.9.1 in the all-deps group (#1660)
+* Bump the docker-deps group across 5 directories with 2 updates (#1661)
+* Bump golangci/golangci-lint-action in the all-deps group (#1662)
+* Bump the docker-deps group across 4 directories with 1 update (#1656)
+* Bump the all-deps group with 2 updates (#1654)
+* Bump the all-deps group with 4 updates (#1657)
+* Bump github/codeql-action from 3.28.5 to 3.28.8 in the all-deps group (#1652)
+* Bump github.com/spf13/pflag from 1.0.5 to 1.0.6 in the all-deps group (#1651)
+* Bump the all-deps group with 2 updates (#1649)
+* Bump the all-deps group with 5 updates (#1650)
+* Bump the docker-deps group across 5 directories with 3 updates (#1648)
+* Bump google.golang.org/protobuf in the all-deps group (#1647)
+* Bump golangci/golangci-lint-action in the all-deps group (#1646)
+
+## v1.3.1
+
+* Add AllLogListSignatureURL by @AlexLaroche in https://github.com/google/certificate-transparency-go/pull/1634
+* Add TiledLogs to log list JSON by @mcpherrinm in https://github.com/google/certificate-transparency-go/pull/1635
+* chore: relax go directive to permit 1.22.x by @dnwe in https://github.com/google/certificate-transparency-go/pull/1640
+
+### Dependency Update
+
+* Bump github.com/fullstorydev/grpcurl from 1.9.1 to 1.9.2 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1627
+* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1628
+* Bump the docker-deps group across 5 directories with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1630
+* Bump github/codeql-action from 3.27.5 to 3.27.6 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1629
+* Bump golang.org/x/crypto from 0.30.0 to 0.31.0 in the go_modules group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1631
+* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1633
+* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1632
+* Bump the docker-deps group across 4 directories with 1 update by @dependabot in https://github.com/google/certificate-transparency-go/pull/1638
+* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1637
+* Bump the all-deps group across 1 directory with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1641
+* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1643
+* Bump google.golang.org/grpc from 1.69.2 to 1.69.4 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1642
+
+## v1.3.0
+
+### CTFE Storage Saving: Extra Data Issuance Chain Deduplication
+
+This feature now supports PostgreSQL, in addition to the support for MySQL/MariaDB that was added in [v1.2.0](#v1.2.0).
+
+Log operators can choose to enable this feature for new PostgreSQL-based CT logs by adding new CTFE configs in the [LogMultiConfig](trillian/ctfe/configpb/config.proto) and importing the [database schema](trillian/ctfe/storage/postgresql/schema.sql). The other available options are documented in the [v1.2.0](#v1.2.0) changelog entry.
+
+This change is tested in Cloud Build tests using the `postgres:17` Docker image as of the time of writing.
+
+* Add IssuanceChainStorage PostgreSQL implementation by @robstradling in https://github.com/google/certificate-transparency-go/pull/1618
+
+### Misc
+
+* [Dependabot] Update all docker images in one PR by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1614
+* Explicitly include version tag by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1617
+* Add empty cloudbuild_postgresql.yaml by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1623
+
+### Dependency update
+
+* Bump the all-deps group with 4 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1609
+* Bump golang from 1.23.2-bookworm to 1.23.3-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1611
+* Bump github/codeql-action from 3.27.0 to 3.27.1 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1610
+* Bump golang from 1.23.2-bookworm to 1.23.3-bookworm in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1612
+* Bump github.com/golang-jwt/jwt/v4 from 4.5.0 to 4.5.1 in the go_modules group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1613
+* Bump the docker-deps group across 3 directories with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1616
+* Bump github/codeql-action from 3.27.1 to 3.27.2 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1615
+* Bump the docker-deps group across 4 directories with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1622
+* Bump github/codeql-action from 3.27.2 to 3.27.4 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1620
+* Bump the all-deps group with 4 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1621
+* Bump github.com/google/trillian from 1.6.1 to 1.7.0 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1624
+* Bump github/codeql-action from 3.27.4 to 3.27.5 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1625
+
+## v1.2.2
+
+* Recommended Go version for development: 1.22
+ * Using a different version can lead to presubmits failing due to unexpected diffs.
+
+### Add TLS Support
+
+Add TLS support for Trillian: By using `--trillian_tls_ca_cert_file` flag, users can provide a CA certificate, that is used to establish a secure communication with Trillian log server.
+
+Add TLS support for ct_server: By using `--tls_certificate` and `--tls_key` flags, users can provide a service certificate and key, that enables the server to handle HTTPS requests.
+
+* Add TLS support for CTLog server by @fghanmi in https://github.com/google/certificate-transparency-go/pull/1523
+* Add TLS support for migrillian by @fghanmi in https://github.com/google/certificate-transparency-go/pull/1525
+* fix TLS configuration for ct_server by @fghanmi in https://github.com/google/certificate-transparency-go/pull/1542
+* Add Trillian TLS support for ct_server by @fghanmi in https://github.com/google/certificate-transparency-go/pull/1551
+
+### HTTP Idle Connection Timeout Flag
+
+A new flag `http_idle_timeout` is added to set the HTTP server's idle timeout value in the ct_server binary. This controls the maximum amount of time to wait for the next request when keep-alives are enabled.
+
+* add flag for HTTP idle connection timeout value by @bobcallaway in https://github.com/google/certificate-transparency-go/pull/1597
+
+### Misc
+
+* Refactor issuance chain service by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1512
+* Use the version in the go.mod file for vuln checks by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1528
+
+### Fixes
+
+* Fix failed tests on 32-bit OS by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1540
+
+### Dependency update
+
+* Bump go.etcd.io/etcd/v3 from 3.5.13 to 3.5.14 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1500
+* Bump github/codeql-action from 3.25.6 to 3.25.7 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1501
+* Bump golang.org/x/net from 0.25.0 to 0.26.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1503
+* Group dependabot updates as much as possible by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1506
+* Bump golang from 1.22.3-bookworm to 1.22.4-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1507
+* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1511
+* Bump golang from 1.22.3-bookworm to 1.22.4-bookworm in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1510
+* Bump golang from 1.22.3-bookworm to 1.22.4-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1509
+* Bump golang from 1.22.3-bookworm to 1.22.4-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1508
+* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1516
+* Bump golang from `aec4784` to `9678844` in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1518
+* Bump alpine from 3.19 to 3.20 in /trillian/examples/deployment/docker/envsubst by @dependabot in https://github.com/google/certificate-transparency-go/pull/1492
+* Bump golang from `aec4784` to `9678844` in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1517
+* Bump golang from `aec4784` to `9678844` in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1513
+* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1515
+* Bump golang from `aec4784` to `9678844` in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1514
+* Bump alpine from `77726ef` to `b89d9c9` in /trillian/examples/deployment/docker/envsubst in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1519
+* Bump k8s.io/klog/v2 from 2.130.0 to 2.130.1 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1521
+* Bump alpine from `77726ef` to `b89d9c9` in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1520
+* Bump github/codeql-action from 3.25.10 to 3.25.11 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1526
+* Bump version of go used by the vuln checker by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1527
+* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1530
+* Bump golang from 1.22.4-bookworm to 1.22.5-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1531
+* Bump golang from 1.22.4-bookworm to 1.22.5-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1532
+* Bump the all-deps group in /trillian/examples/deployment/docker/ctfe with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1533
+* Bump actions/upload-artifact from 4.3.3 to 4.3.4 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1534
+* Bump golang from 1.22.4-bookworm to 1.22.5-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1535
+* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1536
+* Bump github/codeql-action from 3.25.12 to 3.25.13 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1538
+* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1537
+* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1543
+* Bump golang from `6c27802` to `af9b40f` in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1544
+* Bump golang from `6c27802` to `af9b40f` in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1548
+* Bump golang from `6c27802` to `af9b40f` in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1547
+* Bump alpine from `b89d9c9` to `0a4eaa0` in /trillian/examples/deployment/docker/envsubst in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1546
+* Bump the all-deps group in /internal/witness/cmd/feeder with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1545
+* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1549
+* Bump golang.org/x/time from 0.5.0 to 0.6.0 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1550
+* Bump golang from 1.22.5-bookworm to 1.22.6-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1552
+* Bump golang from 1.22.5-bookworm to 1.22.6-bookworm in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1553
+* Bump golang from 1.22.5-bookworm to 1.22.6-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1554
+* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1555
+* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1556
+* Bump golang from 1.22.5-bookworm to 1.22.6-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1557
+* Bump github.com/prometheus/client_golang from 1.19.1 to 1.20.0 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1559
+* Bump github/codeql-action from 3.26.0 to 3.26.3 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1561
+* Bump golang from 1.22.6-bookworm to 1.23.0-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1558
+* Bump golang from 1.22.6-bookworm to 1.23.0-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1563
+* Bump golang from 1.22.6-bookworm to 1.23.0-bookworm in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1560
+* Bump golang from 1.22.6-bookworm to 1.23.0-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1562
+* Bump go version to 1.22.6 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1564
+* Bump github.com/prometheus/client_golang from 1.20.0 to 1.20.2 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1565
+* Bump github/codeql-action from 3.26.3 to 3.26.5 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1566
+* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1568
+* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1569
+* Bump go from 1.22.6 to 1.22.7 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1574
+* Bump alpine from `0a4eaa0` to `beefdbd` in /trillian/examples/deployment/docker/envsubst in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1571
+* Bump the all-deps group across 1 directory with 5 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1577
+* Bump golang from 1.23.0-bookworm to 1.23.1-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1575
+* Bump golang from 1.23.0-bookworm to 1.23.1-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1576
+* Bump the all-deps group in /trillian/examples/deployment/docker/ctfe with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1572
+* Bump the all-deps group in /internal/witness/cmd/feeder with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1573
+* Bump the all-deps group with 4 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1578
+* Bump github/codeql-action from 3.26.6 to 3.26.7 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1579
+* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1580
+* Bump github/codeql-action from 3.26.7 to 3.26.8 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1581
+* Bump distroless/base-debian12 from `c925d12` to `88e0a2a` in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1582
+* Bump the all-deps group in /trillian/examples/deployment/docker/ctfe with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1585
+* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1583
+* Bump golang from `1a5326b` to `dba79eb` in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1584
+* Bump golang from `1a5326b` to `dba79eb` in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1587
+* Bump golang from `1a5326b` to `dba79eb` in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1586
+* Bump the all-deps group with 5 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1588
+* Bump the all-deps group with 6 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1589
+* Bump golang from 1.23.1-bookworm to 1.23.2-bookworm in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1593
+* Bump golang from 1.23.1-bookworm to 1.23.2-bookworm in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1592
+* Bump golang from 1.23.1-bookworm to 1.23.2-bookworm in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1591
+* Bump golang from 1.23.1-bookworm to 1.23.2-bookworm in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1590
+* Bump the all-deps group with 2 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1595
+* Bump github.com/prometheus/client_golang from 1.20.4 to 1.20.5 in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1598
+* Bump golang from `18d2f94` to `2341ddf` in /integration in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1602
+* Bump golang from `18d2f94` to `2341ddf` in /internal/witness/cmd/witness in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1599
+* Bump golang from `18d2f94` to `2341ddf` in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1600
+* Bump golang from `18d2f94` to `2341ddf` in /internal/witness/cmd/feeder in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1601
+* Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1603
+* Bump distroless/base-debian12 from `6ae5fe6` to `8fe31fb` in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1604
+
+## v1.2.1
+
+### Fixes
+
+* Fix Go potential bugs and maintainability by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1496
+
+### Dependency update
+
+* Bump google.golang.org/grpc from 1.63.2 to 1.64.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1482
+
+## v1.2.0
+
+### CTFE Storage Saving: Extra Data Issuance Chain Deduplication
+
+To reduce CT/Trillian database storage by deduplication of the entire issuance chain (intermediate certificate(s) and root certificate) that is currently stored in the Trillian merkle tree leaf ExtraData field. Storage cost should be reduced by at least 33% for new CT logs with this feature enabled. Currently only MySQL/MariaDB is supported to store the issuance chain in the CTFE database.
+
+Existing logs are not affected by this change.
+
+Log operators can choose to opt-in this change for new CT logs by adding new CTFE configs in the [LogMultiConfig](trillian/ctfe/configpb/config.proto) and importing the [database schema](trillian/ctfe/storage/mysql/schema.sql). See [example](trillian/examples/deployment/docker/ctfe/ct_server.cfg).
+
+- `ctfe_storage_connection_string`
+- `extra_data_issuance_chain_storage_backend`
+
+An optional LRU cache can be enabled by providing the following flags.
+
+- `cache_type`
+- `cache_size`
+- `cache_ttl`
+
+This change is tested in Cloud Build tests using the `mysql:8.4` Docker image as of the time of writing.
+
+* Add issuance chain storage interface by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1430
+* Add issuance chain cache interface by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1431
+* Add CTFE extra data storage saving configs to config.proto by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1432
+* Add new types `PrecertChainEntryHash` and `CertificateChainHash` for TLS marshal/unmarshal in storage saving by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1435
+* Add IssuanceChainCache LRU implementation by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1454
+* Add issuance chain service by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1452
+* Add CTFE extra data storage saving configs validation by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1456
+* Add IssuanceChainStorage MySQL implementation by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1462
+* Fix errcheck lint in mysql test by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1464
+* CTFE Extra Data Issuance Chain Deduplication by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1477
+* Fix incorrect deployment doc and server config by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1494
+
+### Submission proxy: Root compatibility checking
+
+* Adds the ability for a CT client to disable root compatibile checking by @aaomidi in https://github.com/google/certificate-transparency-go/pull/1258
+
+### Fixes
+
+* Return 429 Too Many Requests for gRPC error code `ResourceExhausted` from Trillian by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1401
+* Safeguard against redirects on PUT request by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1418
+* Fix CT client upload to be safe against no-op POSTs by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1424
+
+### Misc
+
+* Prefix errors.New variables with the word "Err" by @aaomidi in https://github.com/google/certificate-transparency-go/pull/1399
+* Remove lint exceptions and fix remaining issues by @silaselisha in https://github.com/google/certificate-transparency-go/pull/1438
+* Fix invalid Go toolchain version by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1471
+* Regenerate proto files by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1489
+
+### Dependency update
+
+* Bump distroless/base-debian12 from `5eae9ef` to `28a7f1f` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1388
+* Bump github/codeql-action from 3.24.6 to 3.24.7 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1389
+* Bump actions/checkout from 4.1.1 to 4.1.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1390
+* Bump golang from `6699d28` to `7f9c058` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1391
+* Bump golang from `6699d28` to `7f9c058` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1392
+* Bump golang from `6699d28` to `7a392a2` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1393
+* Bump golang from `6699d28` to `7a392a2` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1394
+* Bump golang from `7a392a2` to `d996c64` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1395
+* Bump golang from `7f9c058` to `d996c64` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1396
+* Bump golang from `7a392a2` to `d996c64` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1397
+* Bump golang from `7f9c058` to `d996c64` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1398
+* Bump github/codeql-action from 3.24.7 to 3.24.8 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1400
+* Bump github/codeql-action from 3.24.8 to 3.24.9 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1402
+* Bump go.etcd.io/etcd/v3 from 3.5.12 to 3.5.13 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1405
+* Bump distroless/base-debian12 from `28a7f1f` to `611d30d` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1406
+* Bump golang from 1.22.1-bookworm to 1.22.2-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1407
+* Bump golang.org/x/net from 0.22.0 to 0.23.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1408
+* update govulncheck go version from 1.21.8 to 1.21.9 by @phbnf in https://github.com/google/certificate-transparency-go/pull/1412
+* Bump golang from 1.22.1-bookworm to 1.22.2-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1409
+* Bump golang from 1.22.1-bookworm to 1.22.2-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1410
+* Bump golang.org/x/crypto from 0.21.0 to 0.22.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1414
+* Bump golang from 1.22.1-bookworm to 1.22.2-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1411
+* Bump github/codeql-action from 3.24.9 to 3.24.10 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1415
+* Bump golang.org/x/net from 0.23.0 to 0.24.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1416
+* Bump google.golang.org/grpc from 1.62.1 to 1.63.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1417
+* Bump github.com/fullstorydev/grpcurl from 1.8.9 to 1.9.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1419
+* Bump golang from `48b942a` to `3451eec` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1421
+* Bump golang from `48b942a` to `3451eec` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1423
+* Bump golang from `48b942a` to `3451eec` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1420
+* Bump golang from `3451eec` to `b03f3ba` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1426
+* Bump golang from `3451eec` to `b03f3ba` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1425
+* Bump golang from `48b942a` to `3451eec` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1422
+* Bump golang from `3451eec` to `b03f3ba` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1427
+* Bump golang from `3451eec` to `b03f3ba` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1428
+* Bump github/codeql-action from 3.24.10 to 3.25.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1433
+* Bump github/codeql-action from 3.25.0 to 3.25.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1434
+* Bump actions/upload-artifact from 4.3.1 to 4.3.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1436
+* Bump actions/checkout from 4.1.2 to 4.1.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1437
+* Bump actions/upload-artifact from 4.3.2 to 4.3.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1440
+* Bump github/codeql-action from 3.25.1 to 3.25.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1441
+* Bump golang from `b03f3ba` to `d0902ba` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1444
+* Bump golang from `b03f3ba` to `d0902ba` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1443
+* Bump github.com/rs/cors from 1.10.1 to 1.11.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1442
+* Bump golang from `b03f3ba` to `d0902ba` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1447
+* Bump actions/checkout from 4.1.3 to 4.1.4 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1446
+* Bump github/codeql-action from 3.25.2 to 3.25.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1449
+* Bump golangci/golangci-lint-action from 4.0.0 to 5.0.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1448
+* Bump golang from `b03f3ba` to `d0902ba` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1445
+* Bump golangci/golangci-lint-action from 5.0.0 to 5.1.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1451
+* Bump distroless/base-debian12 from `611d30d` to `d8d01e2` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1450
+* Bump google.golang.org/protobuf from 1.33.1-0.20240408130810-98873a205002 to 1.34.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1453
+* Bump actions/setup-go from 5.0.0 to 5.0.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1455
+* Bump golang.org/x/net from 0.24.0 to 0.25.0 and golang.org/x/crypto from v0.22.0 to v0.23.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1457
+* Bump google.golang.org/protobuf from 1.34.0 to 1.34.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1458
+* Bump distroless/base-debian12 from `d8d01e2` to `786007f` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1461
+* Bump golangci/golangci-lint-action from 5.1.0 to 5.3.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1460
+* Bump `go-version-input` to 1.21.10 in govulncheck.yml by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1472
+* Bump golangci/golangci-lint-action from 5.3.0 to 6.0.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1473
+* Bump actions/checkout from 4.1.4 to 4.1.5 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1469
+* Bump github.com/go-sql-driver/mysql from 1.7.1 to 1.8.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1465
+* Bump golang from 1.22.2-bookworm to 1.22.3-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1466
+* Bump golang from 1.22.2-bookworm to 1.22.3-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1463
+* Bump golang from 1.22.2-bookworm to 1.22.3-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1470
+* Bump golang from 1.22.2-bookworm to 1.22.3-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1467
+* Bump github/codeql-action from 3.25.3 to 3.25.4 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1474
+* Bump github.com/prometheus/client_golang from 1.19.0 to 1.19.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1475
+* Bump ossf/scorecard-action from 2.3.1 to 2.3.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1476
+* Bump github/codeql-action from 3.25.4 to 3.25.5 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1478
+* Bump golang from `6d71b7c` to `ef27a3c` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1480
+* Bump golang from `6d71b7c` to `ef27a3c` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1481
+* Bump golang from `6d71b7c` to `ef27a3c` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1479
+* Bump golang from `6d71b7c` to `ef27a3c` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1483
+* Bump golang from `ef27a3c` to `5c56bd4` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1484
+* Bump golang from `ef27a3c` to `5c56bd4` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1485
+* Bump golang from `ef27a3c` to `5c56bd4` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1486
+* Bump actions/checkout from 4.1.5 to 4.1.6 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1487
+* Bump golang from `ef27a3c` to `5c56bd4` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1488
+* Bump github/codeql-action from 3.25.5 to 3.25.6 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1490
+* Bump alpine from `c5b1261` to `58d02b4` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1491
+* Bump alpine from `58d02b4` to `77726ef` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1493
+
+## v1.1.8
+
+* Recommended Go version for development: 1.21
+ * Using a different version can lead to presubmits failing due to unexpected diffs.
+
+### Add support for AIX
+
+* crypto/x509: add AIX operating system by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1277
+
+### Monitoring
+
+* Distribution metric to monitor the start of get-entries requests by @phbnf in https://github.com/google/certificate-transparency-go/pull/1364
+
+### Fixes
+
+* Use the appropriate HTTP response code for backend timeouts by @robstradling in https://github.com/google/certificate-transparency-go/pull/1313
+
+### Misc
+
+* Move golangci-lint from Cloud Build to GitHub Action by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1230
+* Set golangci-lint GH action timeout to 5m by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1231
+* Added Slack channel details by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1246
+* Improve fuzzing by @AdamKorcz in https://github.com/google/certificate-transparency-go/pull/1345
+
+### Dependency update
+
+* Bump golang from `20f9ab5` to `5ee1296` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1216
+* Bump golang from `20f9ab5` to `5ee1296` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1217
+* Bump golang from `20f9ab5` to `5ee1296` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1218
+* Bump k8s.io/klog/v2 from 2.100.1 to 2.110.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1219
+* Bump golang from `20f9ab5` to `5ee1296` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1220
+* Bump golang from `5ee1296` to `5bafbbb` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1221
+* Bump golang from `5ee1296` to `5bafbbb` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1222
+* Bump golang from `5ee1296` to `5bafbbb` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1223
+* Bump golang from `5ee1296` to `5bafbbb` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1224
+* Update the minimal image to gcr.io/distroless/base-debian12 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1148
+* Bump jq from 1.6 to 1.7 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1225
+* Bump github.com/spf13/cobra from 1.7.0 to 1.8.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1226
+* Bump golang.org/x/time from 0.3.0 to 0.4.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1227
+* Bump github.com/mattn/go-sqlite3 from 1.14.17 to 1.14.18 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1228
+* Bump github.com/gorilla/mux from 1.8.0 to 1.8.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1229
+* Bump golang from 1.21.3-bookworm to 1.21.4-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1232
+* Bump golang from 1.21.3-bookworm to 1.21.4-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1233
+* Bump golang from 1.21.3-bookworm to 1.21.4-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1234
+* Bump golang from 1.21.3-bookworm to 1.21.4-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1235
+* Bump go-version-input from 1.20.10 to 1.20.11 in govulncheck.yml by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1238
+* Bump golang.org/x/net from 0.17.0 to 0.18.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1236
+* Bump github/codeql-action from 2.22.5 to 2.22.6 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1240
+* Bump github/codeql-action from 2.22.6 to 2.22.7 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1241
+* Bump golang from `85aacbe` to `dadce81` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1243
+* Bump golang from `85aacbe` to `dadce81` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1242
+* Bump golang from `85aacbe` to `dadce81` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1244
+* Bump golang from `85aacbe` to `dadce81` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1245
+* Bump golang from `dadce81` to `52362e2` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1247
+* Bump golang from `dadce81` to `52362e2` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1248
+* Bump golang from `dadce81` to `52362e2` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1249
+* Bump golang from `dadce81` to `52362e2` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1250
+* Bump github/codeql-action from 2.22.7 to 2.22.8 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1251
+* Bump golang.org/x/net from 0.18.0 to 0.19.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1252
+* Bump golang.org/x/time from 0.4.0 to 0.5.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1254
+* Bump alpine from `eece025` to `34871e7` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1256
+* Bump alpine from `eece025` to `34871e7` in /trillian/examples/deployment/docker/envsubst by @dependabot in https://github.com/google/certificate-transparency-go/pull/1257
+* Bump go-version-input from 1.20.11 to 1.20.12 in govulncheck.yml by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1264
+* Bump actions/setup-go from 4.1.0 to 5.0.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1261
+* Bump golang from 1.21.4-bookworm to 1.21.5-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1259
+* Bump golang from 1.21.4-bookworm to 1.21.5-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1263
+* Bump golang from 1.21.4-bookworm to 1.21.5-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1262
+* Bump golang from 1.21.4-bookworm to 1.21.5-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1260
+* Bump go.etcd.io/etcd/v3 from 3.5.10 to 3.5.11 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1266
+* Bump github/codeql-action from 2.22.8 to 2.22.9 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1269
+* Bump alpine from `34871e7` to `51b6726` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1270
+* Bump alpine from 3.18 to 3.19 in /trillian/examples/deployment/docker/envsubst by @dependabot in https://github.com/google/certificate-transparency-go/pull/1271
+* Bump golang from `a6b787c` to `2d3b13c` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1272
+* Bump golang from `a6b787c` to `2d3b13c` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1273
+* Bump golang from `a6b787c` to `2d3b13c` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1274
+* Bump golang from `a6b787c` to `2d3b13c` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1275
+* Bump github/codeql-action from 2.22.9 to 2.22.10 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1278
+* Bump google.golang.org/grpc from 1.59.0 to 1.60.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1279
+* Bump github/codeql-action from 2.22.10 to 3.22.11 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1280
+* Bump distroless/base-debian12 from `1dfdb5e` to `8a0bb63` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1281
+* Bump github.com/google/trillian from 1.5.3 to 1.5.4-0.20240110091238-00ca9abe023d by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1297
+* Bump actions/upload-artifact from 3.1.3 to 4.0.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1282
+* Bump github/codeql-action from 3.22.11 to 3.23.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1295
+* Bump github.com/mattn/go-sqlite3 from 1.14.18 to 1.14.19 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1283
+* Bump golang from 1.21.5-bookworm to 1.21.6-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1300
+* Bump distroless/base-debian12 from `8a0bb63` to `0a93daa` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1284
+* Bump golang from 1.21.5-bookworm to 1.21.6-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1299
+* Bump golang from 1.21.5-bookworm to 1.21.6-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1298
+* Bump golang from 1.21.5-bookworm to 1.21.6-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1301
+* Bump golang from `688ad7f` to `1e8ea75` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1306
+* Bump golang from `688ad7f` to `1e8ea75` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1305
+* Use trillian release instead of pinned commit by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1304
+* Bump actions/upload-artifact from 4.0.0 to 4.1.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1310
+* Bump golang from `1e8ea75` to `cbee5d2` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1312
+* Bump golang from `688ad7f` to `cbee5d2` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1308
+* Bump golang from `1e8ea75` to `cbee5d2` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1311
+* Bump golang.org/x/net from 0.19.0 to 0.20.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1302
+* Bump golang from `b651ed8` to `cbee5d2` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1309
+* Bump golang from `cbee5d2` to `c4b696f` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1314
+* Bump golang from `cbee5d2` to `c4b696f` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1315
+* Bump github/codeql-action from 3.23.0 to 3.23.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1317
+* Bump golang from `cbee5d2` to `c4b696f` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1316
+* Bump golang from `cbee5d2` to `c4b696f` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1318
+* Bump k8s.io/klog/v2 from 2.120.0 to 2.120.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1319
+* Bump actions/upload-artifact from 4.1.0 to 4.2.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1320
+* Bump actions/upload-artifact from 4.2.0 to 4.3.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1321
+* Bump golang from `c4b696f` to `d8c365d` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1326
+* Bump golang from `c4b696f` to `d8c365d` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1323
+* Bump google.golang.org/grpc from 1.60.1 to 1.61.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1324
+* Bump golang from `c4b696f` to `d8c365d` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1322
+* Bump golang from `c4b696f` to `d8c365d` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1325
+* Bump github.com/mattn/go-sqlite3 from 1.14.19 to 1.14.20 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1327
+* Bump github/codeql-action from 3.23.1 to 3.23.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1328
+* Bump alpine from `51b6726` to `c5b1261` in /trillian/examples/deployment/docker/envsubst by @dependabot in https://github.com/google/certificate-transparency-go/pull/1330
+* Bump alpine from `51b6726` to `c5b1261` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1329
+* Bump go.etcd.io/etcd/v3 from 3.5.11 to 3.5.12 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1332
+* Bump github.com/mattn/go-sqlite3 from 1.14.20 to 1.14.21 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1333
+* Bump golang from `d8c365d` to `69bfed3` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1335
+* Bump golang from `d8c365d` to `69bfed3` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1338
+* Bump golang from `d8c365d` to `69bfed3` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1337
+* Bump golang from `d8c365d` to `69bfed3` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1336
+* Bump golang from `69bfed3` to `3efef61` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1339
+* Bump github.com/mattn/go-sqlite3 from 1.14.21 to 1.14.22 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1344
+* Bump golang from `69bfed3` to `3efef61` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1341
+* Bump golang from `69bfed3` to `3efef61` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1343
+* Bump distroless/base-debian12 from `0a93daa` to `f47fa3d` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1340
+* Bump golang from `69bfed3` to `3efef61` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1342
+* Bump github/codeql-action from 3.23.2 to 3.24.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1346
+* Bump actions/upload-artifact from 4.3.0 to 4.3.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1347
+* Bump golang from 1.21.6-bookworm to 1.22.0-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1350
+* Bump golang from 1.21.6-bookworm to 1.22.0-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1348
+* Bump golang from 1.21.6-bookworm to 1.22.0-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1349
+* Bump golang from 1.21.6-bookworm to 1.22.0-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1351
+* Bump golang.org/x/crypto from 0.18.0 to 0.19.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1353
+* Bump golangci/golangci-lint-action from 3.7.0 to 4.0.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1354
+* Bump golang.org/x/net from 0.20.0 to 0.21.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1352
+* Bump distroless/base-debian12 from `f47fa3d` to `2102ce1` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1355
+* Bump github/codeql-action from 3.24.0 to 3.24.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1357
+* Bump golang from `874c267` to `5a3e169` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1356
+* Bump golang from `874c267` to `5a3e169` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1358
+* Bump golang from `874c267` to `5a3e169` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1359
+* Bump golang from `874c267` to `5a3e169` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1360
+* Bump github/codeql-action from 3.24.1 to 3.24.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1366
+* Bump golang from `5a3e169` to `925fe3f` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1363
+* Bump google.golang.org/grpc from 1.61.0 to 1.61.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1362
+* Bump golang from `5a3e169` to `925fe3f` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1365
+* Bump golang from `5a3e169` to `925fe3f` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1361
+* Bump golang from `5a3e169` to `925fe3f` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1367
+* Bump golang/govulncheck-action from 1.0.1 to 1.0.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1368
+* Bump github/codeql-action from 3.24.3 to 3.24.5 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1371
+* Bump google.golang.org/grpc from 1.61.1 to 1.62.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1369
+* Bump distroless/base-debian12 from `2102ce1` to `5eae9ef` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1372
+* Bump distroless/base-debian12 from `5eae9ef` to `f9b0e86` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1375
+* Bump golang.org/x/crypto from 0.19.0 to 0.20.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1374
+* Bump github.com/prometheus/client_golang from 1.18.0 to 1.19.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1373
+* Bump github/codeql-action from 3.24.5 to 3.24.6 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1377
+* Bump distroless/base-debian12 from `f9b0e86` to `5eae9ef` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1376
+* Bump golang.org/x/net from 0.21.0 to 0.22.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1378
+* Bump Go from 1.20 to 1.21 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1386
+* Bump google.golang.org/grpc from 1.62.0 to 1.62.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1380
+* Bump golang from 1.22.0-bookworm to 1.22.1-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1382
+* Bump golang from 1.22.0-bookworm to 1.22.1-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1385
+* Bump golang from 1.22.0-bookworm to 1.22.1-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1384
+* Bump golang from 1.22.0-bookworm to 1.22.1-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1383
+
+## v1.1.7
+
+* Recommended Go version for development: 1.20
+ * This is the version used by the Cloud Build presubmits. Using a different version can lead to presubmits failing due to unexpected diffs.
+
+* Bump golangci-lint from 1.51.1 to 1.55.1 (developers should update to this version).
+
+### Add support for WASI port
+
+* Add build tags for wasip1 GOOS by @flavio in https://github.com/google/certificate-transparency-go/pull/1089
+
+### Add support for IBM Z operating system z/OS
+
+* Add build tags for zOS by @onlywork1984 in https://github.com/google/certificate-transparency-go/pull/1088
+
+### Log List
+
+* Add support for "is_all_logs" field in loglist3 by @phbnf in https://github.com/google/certificate-transparency-go/pull/1095
+
+### Documentation
+
+* Improve Dockerized Test Deployment documentation by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1179
+
+### Misc
+
+* Escape forward slashes in certificate Subject names when used as user quota id strings by @robstradling in https://github.com/google/certificate-transparency-go/pull/1059
+* Search whole chain looking for issuer match by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1112
+* Use proper check per @AGWA instead of buggy check introduced in #1112 by @mhutchinson in https://github.com/google/certificate-transparency-go/pull/1114
+* Build the ctfe/ct_server binary without depending on glibc by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1119
+* Migrate CTFE Ingress manifest to support GKE version 1.23 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1086
+* Remove Dependabot ignore configuration by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1097
+* Add "github-actions" and "docker" Dependabot config by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1101
+* Add top level permission in CodeQL workflow by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1102
+* Pin Docker image dependencies by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1110
+* Remove GO111MODULE from Dockerfile and Cloud Build yaml files by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1113
+* Add docker Dependabot config by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1126
+* Export is_mirror = 0.0 for non mirror instead of nothing by @phbnf in https://github.com/google/certificate-transparency-go/pull/1133
+* Add govulncheck GitHub action by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1145
+* Spelling by @jsoref in https://github.com/google/certificate-transparency-go/pull/1144
+
+### Dependency update
+
+* Bump Go from 1.19 to 1.20 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1146
+* Bump golangci-lint from 1.51.1 to 1.55.1 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1214
+* Bump go.etcd.io/etcd/v3 from 3.5.8 to 3.5.9 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1083
+* Bump golang.org/x/crypto from 0.8.0 to 0.9.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/108
+* Bump github.com/mattn/go-sqlite3 from 1.14.16 to 1.14.17 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1092
+* Bump golang.org/x/net from 0.10.0 to 0.11.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1094
+* Bump github.com/prometheus/client_golang from 1.15.1 to 1.16.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1098
+* Bump google.golang.org/protobuf from 1.30.0 to 1.31.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1099
+* Bump golang.org/x/net from 0.11.0 to 0.12.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1108
+* Bump actions/checkout from 3.1.0 to 3.5.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1103
+* Bump github/codeql-action from 2.1.27 to 2.20.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1104
+* Bump ossf/scorecard-action from 2.0.6 to 2.2.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1105
+* Bump actions/upload-artifact from 3.1.0 to 3.1.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1106
+* Bump github/codeql-action from 2.20.3 to 2.20.4 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1115
+* Bump github/codeql-action from 2.20.4 to 2.21.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1117
+* Bump golang.org/x/net from 0.12.0 to 0.14.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1124
+* Bump github/codeql-action from 2.21.0 to 2.21.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1121
+* Bump github/codeql-action from 2.21.2 to 2.21.4 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1125
+* Bump golang from `fd9306e` to `eb3f9ac` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1127
+* Bump alpine from 3.8 to 3.18 in /trillian/examples/deployment/docker/envsubst by @dependabot in https://github.com/google/certificate-transparency-go/pull/1129
+* Bump golang from `fd9306e` to `eb3f9ac` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1128
+* Bump alpine from `82d1e9d` to `7144f7b` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1130
+* Bump golang from `fd9306e` to `eb3f9ac` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1131
+* Bump golang from 1.19-alpine to 1.21-alpine in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1132
+* Bump actions/checkout from 3.5.3 to 3.6.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1134
+* Bump github/codeql-action from 2.21.4 to 2.21.5 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1135
+* Bump distroless/base from `73deaaf` to `46c5b9b` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1136
+* Bump actions/checkout from 3.6.0 to 4.0.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1137
+* Bump golang.org/x/net from 0.14.0 to 0.15.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1139
+* Bump github.com/rs/cors from 1.9.0 to 1.10.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1140
+* Bump actions/upload-artifact from 3.1.2 to 3.1.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1141
+* Bump golang from `445f340` to `96634e5` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1142
+* Bump github/codeql-action from 2.21.5 to 2.21.6 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1149
+* Bump Docker golang base images to 1.21.1 by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1147
+* Bump github/codeql-action from 2.21.6 to 2.21.7 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1150
+* Bump github/codeql-action from 2.21.7 to 2.21.8 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1152
+* Bump golang from `d3114db` to `a0b3bc4` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1155
+* Bump golang from `d3114db` to `a0b3bc4` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1157
+* Bump golang from `d3114db` to `a0b3bc4` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1156
+* Bump golang from `d3114db` to `a0b3bc4` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1158
+* Bump golang from `e06b3a4` to `114b9cc` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1159
+* Bump golang from `a0b3bc4` to `114b9cc` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1160
+* Bump golang from `a0b3bc4` to `114b9cc` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1161
+* Bump actions/checkout from 4.0.0 to 4.1.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1162
+* Bump golang from `114b9cc` to `9c7ea4a` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1163
+* Bump golang from `114b9cc` to `9c7ea4a` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1166
+* Bump golang from `114b9cc` to `9c7ea4a` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1165
+* Bump golang from `114b9cc` to `9c7ea4a` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1164
+* Bump github/codeql-action from 2.21.8 to 2.21.9 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1169
+* Bump golang from `9c7ea4a` to `61f84bc` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1168
+* Bump github.com/prometheus/client_golang from 1.16.0 to 1.17.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1172
+* Bump golang from `9c7ea4a` to `61f84bc` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1170
+* Bump github.com/rs/cors from 1.10.0 to 1.10.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1176
+* Bump alpine from `7144f7b` to `eece025` in /trillian/examples/deployment/docker/envsubst by @dependabot in https://github.com/google/certificate-transparency-go/pull/1174
+* Bump alpine from `7144f7b` to `eece025` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1175
+* Bump golang from `9c7ea4a` to `61f84bc` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1171
+* Bump golang from `9c7ea4a` to `61f84bc` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1173
+* Bump distroless/base from `46c5b9b` to `a35b652` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1177
+* Bump golang.org/x/crypto from 0.13.0 to 0.14.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1178
+* Bump github/codeql-action from 2.21.9 to 2.22.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1180
+* Bump golang from 1.21.1-bookworm to 1.21.2-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1181
+* Bump golang.org/x/net from 0.15.0 to 0.16.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1184
+* Bump golang from 1.21.1-bookworm to 1.21.2-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1182
+* Bump golang from 1.21.1-bookworm to 1.21.2-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1185
+* Bump golang from 1.21.1-bookworm to 1.21.2-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1183
+* Bump github/codeql-action from 2.22.0 to 2.22.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1186
+* Bump distroless/base from `a35b652` to `b31a6e0` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1188
+* Bump ossf/scorecard-action from 2.2.0 to 2.3.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1187
+* Bump github.com/google/go-cmp from 0.5.9 to 0.6.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1189
+* Bump golang.org/x/net from 0.16.0 to 0.17.0 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1190
+* Bump go-version-input from 1.20.8 to 1.20.10 in govulncheck by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1195
+* Bump golang from 1.21.2-bookworm to 1.21.3-bookworm in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1193
+* Bump golang from 1.21.2-bookworm to 1.21.3-bookworm in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1191
+* Bump golang from 1.21.2-bookworm to 1.21.3-bookworm in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1194
+* Bump golang from 1.21.2-bookworm to 1.21.3-bookworm in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1192
+* Bump golang from `a94b089` to `8f9a1ec` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1196
+* Bump github/codeql-action from 2.22.1 to 2.22.2 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1197
+* Bump golang from `a94b089` to `5cc7ddc` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1200
+* Bump golang from `a94b089` to `5cc7ddc` in /internal/witness/cmd/witness by @dependabot in https://github.com/google/certificate-transparency-go/pull/1199
+* Bump github/codeql-action from 2.22.2 to 2.22.3 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1202
+* Bump golang from `5cc7ddc` to `20f9ab5` in /integration by @dependabot in https://github.com/google/certificate-transparency-go/pull/1203
+* Bump golang from `a94b089` to `20f9ab5` in /trillian/examples/deployment/docker/ctfe by @dependabot in https://github.com/google/certificate-transparency-go/pull/1198
+* Bump golang from `8f9a1ec` to `20f9ab5` in /internal/witness/cmd/feeder by @dependabot in https://github.com/google/certificate-transparency-go/pull/1201
+* Bump actions/checkout from 4.1.0 to 4.1.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1204
+* Bump github/codeql-action from 2.22.3 to 2.22.4 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1206
+* Bump ossf/scorecard-action from 2.3.0 to 2.3.1 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1207
+* Bump github/codeql-action from 2.22.4 to 2.22.5 by @dependabot in https://github.com/google/certificate-transparency-go/pull/1209
+* Bump multiple Go module dependencies by @roger2hk in https://github.com/google/certificate-transparency-go/pull/1213
+
+## v1.1.6
+
+### Dependency update
+
+* Bump Trillian to v1.5.2
+* Bump Prometheus to v0.43.1
+
+## v1.1.5
+
+### Public/Private Key Consistency
+
+ * #1044: If a public key has been configured for a log, check that it is consistent with the private key.
+ * #1046: Ensure that no two logs in the CTFE configuration use the same private key.
+
+### Cleanup
+
+ * Remove v2 log list package files.
+
+### Misc
+
+ * Updated golangci-lint to v1.51.1 (developers should update to this version).
+ * Bump Go version from 1.17 to 1.19.
+
+## v1.1.4
+
+[Published 2022-10-21](https://github.com/google/certificate-transparency-go/releases/tag/v1.1.4)
+
+### Cleanup
+
+ * Remove log list v1 package and its dependencies.
+
+### Migrillian
+
+ * #960: Skip consistency check when root is size zero.
+
+### Misc
+
+ * Update Trillian to [0a389c4](https://github.com/google/trillian/commit/0a389c4bb8d97fb3be8f55d7e5b428cf4304986f)
+ * Migrate loglist dependency from v1 to v3 in ctclient cmd.
+ * Migrate loglist dependency from v1 to v3 in ctutil/loginfo.go
+ * Migrate loglist dependency from v1 to v3 in ctutil/sctscan.go
+ * Migrate loglist dependency from v1 to v3 in trillian/integration/ct_hammer/main.go
+ * Downgrade 429 errors to verbosity 2
+
+## v1.1.3
+
+[Published 2022-05-14](https://github.com/google/certificate-transparency-go/releases/tag/v1.1.3)
+
+### Integration
+
+ * Breaking change to API for `integration.HammerCTLog`:
+ * Added `ctx` as first argument, and terminate loop if it becomes cancelled
+
+### JSONClient
+
+ * PostAndParseWithRetry now does backoff-and-retry upon receiving HTTP 429.
+
+### Cleanup
+
+ * `WithBalancerName` is deprecated and removed, using the recommended way.
+ * `ctfe.PEMCertPool` type has been moved to `x509util.PEMCertPool` to reduce
+ dependencies (#903).
+
+### Misc
+
+ * updated golangci-lint to v1.46.1 (developers should update to this version)
+ * update `google.golang.org/grpc` to v1.46.0
+ * `ctclient` tool now uses Cobra for better CLI experience (#901).
+ * #800: Remove dependency from `ratelimit`.
+ * #927: Add read-only mode to CTFE config.
+
+## v1.1.2
+
+[Published 2021-09-21](https://github.com/google/certificate-transparency-go/releases/tag/v1.1.2)
+
+### CTFE
+
+ * Removed the `-by_range` flag.
+
+### Updated dependencies
+
+ * Trillian from v1.3.11 to v1.4.0
+ * protobuf to v2
+
+## v1.1.1
+
+[Published 2020-10-06](https://github.com/google/certificate-transparency-go/releases/tag/v1.1.1)
+
+### Tools
+
+#### CT Hammer
+
+Added a flag (--strict_sth_consistency_size) which when set to true enforces the current behaviour of only request consistency proofs between tree sizes for which the hammer has seen valid STHs.
+When setting this flag to false, if no two usable STHs are available the hammer will attempt to request a consistency proof between the latest STH it's seen and a random smaller (but > 0) tree size.
+
+
+### CTFE
+
+#### Caching
+
+The CTFE now includes a Cache-Control header in responses containing purely
+immutable data, e.g. those for get-entries and get-proof-by-hash. This allows
+clients and proxies to cache these responses for up to 24 hours.
+
+#### EKU Filtering
+
+> :warning: **It is not yet recommended to enable this option in a production CT Log!**
+
+CTFE now supports filtering logging submissions by leaf certificate EKU.
+This is enabled by adding an extKeyUsage list to a log's stanza in the
+config file.
+
+The format is a list of strings corresponding to the supported golang x509 EKUs:
+ |Config string | Extended Key Usage |
+ |----------------------------|----------------------------------------|
+ |`Any` | ExtKeyUsageAny |
+ |`ServerAuth` | ExtKeyUsageServerAuth |
+ |`ClientAuth` | ExtKeyUsageClientAuth |
+ |`CodeSigning` | ExtKeyUsageCodeSigning |
+ |`EmailProtection` | ExtKeyUsageEmailProtection |
+ |`IPSECEndSystem` | ExtKeyUsageIPSECEndSystem |
+ |`IPSECTunnel` | ExtKeyUsageIPSECTunnel |
+ |`IPSECUser` | ExtKeyUsageIPSECUser |
+ |`TimeStamping` | ExtKeyUsageTimeStamping |
+ |`OCSPSigning` | ExtKeyUsageOCSPSigning |
+ |`MicrosoftServerGatedCrypto`| ExtKeyUsageMicrosoftServerGatedCrypto |
+ |`NetscapeServerGatedCrypto` | ExtKeyUsageNetscapeServerGatedCrypto |
+
+When an extKeyUsage list is specified, the CT Log will reject logging
+submissions for leaf certificates that do not contain an EKU present in this
+list.
+
+When enabled, EKU filtering is only performed at the leaf level (i.e. there is
+no 'nested' EKU filtering performed).
+
+If no list is specified, or the list contains an `Any` entry, no EKU
+filtering will be performed.
+
+#### GetEntries
+Calls to `get-entries` which are at (or above) the maximum permitted number of
+entries whose `start` parameter does not fall on a multiple of the maximum
+permitted number of entries, will have their responses truncated such that
+subsequent requests will align with this boundary.
+This is intended to coerce callers of `get-entries` into all using the same
+`start` and `end` parameters and thereby increase the cacheability of
+these requests.
+
+e.g.:
+
+
+Old behaviour:
+ 1 2 3
+ 0 0 0
+Entries>-----|---------|---------|----...
+Client A -------|---------|----------|...
+Client B --|--------|---------|-------...
+ ^ ^ ^
+ `--------`---------`---- requests
+
+With coercion (max batch = 10 entries):
+ 1 2 3
+ 0 0 0
+Entries>-----|---------|---------|----...
+Client A ----X---------|---------|...
+Client B --|-X---------|---------|-------...
+ ^
+ `-- Requests truncated
+
+
+This behaviour can be disabled by setting the `--align_getentries`
+flag to false.
+
+#### Flags
+
+The `ct_server` binary changed the default of these flags:
+
+- `by_range` - Now defaults to `true`
+
+The `ct_server` binary added the following flags:
+- `align_getentries` - See GetEntries section above for details
+
+Added `backend` flag to `migrillian`, which now replaces the deprecated
+"backend" feature of Migrillian configs.
+
+#### FixedBackendResolver Replaced
+
+This was previously used in situations where a comma separated list of
+backends was provided in the `rpcBackend` flag rather than a single value.
+
+It has been replaced by equivalent functionality using a newer gRPC API.
+However this support was only intended for use in integration tests. In
+production we recommend the use of etcd or a gRPC load balancer.
+
+### LogList
+
+Log list tools updated to use the correct v2 URL (from v2_beta previously).
+
+### Libraries
+
+#### x509 fork
+
+Merged upstream Go 1.13 and Go 1.14 changes (with the exception
+of https://github.com/golang/go/commit/14521198679e, to allow
+old certs using a malformed root still to be logged).
+
+#### asn1 fork
+
+Merged upstream Go 1.14 changes.
+
+#### ctutil
+
+Added VerifySCTWithVerifier() to verify SCTs using a given ct.SignatureVerifier.
+
+### Configuration Files
+
+Configuration files that previously had to be text-encoded Protobuf messages can
+now alternatively be binary-encoded instead.
+
+### JSONClient
+
+- `PostAndParseWithRetry` error logging now includes log URI in messages.
+
+### Minimal Gossip Example
+
+All the code for this, except for the x509ext package, has been moved over
+to the [trillian-examples](https://github.com/google/trillian-examples) repository.
+
+This keeps the code together and removes a circular dependency between the
+two repositories. The package layout and structure remains the same so
+updating should just mean changing any relevant import paths.
+
+### Dependencies
+
+A circular dependency on the [monologue](https://github.com/google/monologue) repository has been removed.
+
+A circular dependency on the [trillian-examples](https://github.com/google/trillian-examples) repository has been removed.
+
+The version of trillian in use has been updated to 1.3.11. This has required
+various other dependency updates including gRPC and protobuf. This code now
+uses the v2 proto API. The Travis tests now expect the 3.11.4 version of
+protoc.
+
+The version of etcd in use has been switched to the one from `go.etcd.io`.
+
+Most of the above changes are to align versions more closely with the ones
+used in the trillian repository.
+
+## v1.1.0
+
+Published 2019-11-14 15:00:00 +0000 UTC
+
+### CTFE
+
+The `reject_expired` and `reject_unexpired` configuration fields for the CTFE
+have been changed so that their behaviour reflects their name:
+
+- `reject_expired` only rejects expired certificates (i.e. it now allows
+ not-yet-valid certificates).
+- `reject_unexpired` only allows expired certificates (i.e. it now rejects
+ not-yet-valid certificates).
+
+A `reject_extensions` configuration field for the CTFE was added, this allows
+submissions to be rejected if they contain an extension with any of the
+specified OIDs.
+
+A `frozen_sth` configuration field for the CTFE was added. This STH will be
+served permanently. It must be signed by the log's private key.
+
+A `/healthz` URL has been added which responds with HTTP 200 OK and the string
+"ok" when the server is up.
+
+#### Flags
+
+The `ct_server` binary has these new flags:
+
+- `mask_internal_errors` - Removes error strings from HTTP 500 responses
+ (Internal Server Error)
+
+Removed default values for `--metrics_endpoint` and `--log_rpc_server` flags.
+This makes it easier to get the documented "unset" behaviour.
+
+#### Metrics
+
+The CTFE exports these new metrics:
+
+- `is_mirror` - set to 1 for mirror logs (copies of logs hosted elsewhere)
+- `frozen_sth_timestamp` - time of the frozen Signed Tree Head in milliseconds
+ since the epoch
+
+#### Kubernetes
+
+Updated prometheus-to-sd to v0.5.2.
+
+A dedicated node pool is no longer required by the Kubernetes manifests.
+
+### Log Lists
+
+A new package has been created for parsing, searching and creating JSON log
+lists compatible with the
+[v2 schema](http://www.gstatic.com/ct/log_list/v2_beta/log_list_schema.json):
+`github.com/google/certificate-transparency-go/loglist2`.
+
+### Docker Images
+
+Our Docker images have been updated to use Go 1.11 and
+[Distroless base images](https://github.com/GoogleContainerTools/distroless).
+
+The CTFE Docker image now sets `ENTRYPOINT`.
+
+### Utilities / Libraries
+
+#### jsonclient
+
+The `jsonclient` package now copes with empty HTTP responses. The user-agent
+header it sends can now be specified.
+
+#### x509 and asn1 forks
+
+Merged upstream changes from Go 1.12 into the `asn1` and `x509` packages.
+
+Added a "lax" tag to `asn1` that applies recursively and makes some checks more
+relaxed:
+
+- parsePrintableString() copes with invalid PrintableString contents, e.g. use
+ of tagPrintableString when the string data is really ISO8859-1.
+- checkInteger() allows integers that are not minimally encoded (and so are
+ not correct DER).
+- OIDs are allowed to be empty.
+
+The following `x509` functions will now return `x509.NonFatalErrors` if ASN.1
+parsing fails in strict mode but succeeds in lax mode. Previously, they only
+attempted strict mode parsing.
+
+- `x509.ParseTBSCertificate()`
+- `x509.ParseCertificate()`
+- `x509.ParseCertificates()`
+
+The `x509` package will now treat a negative RSA modulus as a non-fatal error.
+
+The `x509` package now supports RSASES-OAEP and Ed25519 keys.
+
+#### ctclient
+
+The `ctclient` tool now defaults to using
+[all_logs_list.json](https://www.gstatic.com/ct/log_list/all_logs_list.json)
+instead of [log_list.json](https://www.gstatic.com/ct/log_list/log_list.json).
+This can be overridden using the `--log_list` flag.
+
+It can now perform inclusion checks on pre-certificates.
+
+It has these new commands:
+
+- `bisect` - Finds a log entry given a timestamp.
+
+It has these new flags:
+
+- `--chain` - Displays the entire certificate chain
+- `--dns_server` - The DNS server to direct queries to (system resolver by
+ default)
+- `--skip_https_verify` - Skips verification of the HTTPS connection
+- `--timestamp` - Timestamp to use for `bisect` and `inclusion` commands (for
+ `inclusion`, only if --leaf_hash is not used)
+
+It now accepts hex or base64-encoded strings for the `--tree_hash`,
+`--prev_hash` and `--leaf_hash` flags.
+
+#### certcheck
+
+The `certcheck` tool has these new flags:
+
+- `--check_time` - Check current validity of certificate (replaces
+ `--timecheck`)
+- `--check_name` - Check validity of certificate name
+- `--check_eku` - Check validity of EKU nesting
+- `--check_path_len` - Check validity of path length constraint
+- `--check_name_constraint` - Check name constraints
+- `--check_unknown_critical_exts` - Check for unknown critical extensions
+ (replaces `--ignore_unknown_critical_exts`)
+- `--strict` - Set non-zero exit code for non-fatal errors in parsing
+
+#### sctcheck
+
+The `sctcheck` tool has these new flags:
+
+- `--check_inclusion` - Checks that the SCT was honoured (i.e. the
+ corresponding certificate was included in the issuing CT log)
+
+#### ct_hammer
+
+The `ct_hammer` tool has these new flags:
+
+- `--duplicate_chance` - Allows setting the probability of the hammer sending
+ a duplicate submission.
+
+## v1.0.21 - CTFE Logging / Path Options. Mirroring. RPKI. Non Fatal X.509 error improvements
+
+Published 2018-08-20 10:11:04 +0000 UTC
+
+### CTFE
+
+`CTFE` no longer prints certificate chains as long byte strings in messages when handler errors occur. This was obscuring the reason for the failure and wasn't particularly useful.
+
+`CTFE` now has a global log URL path prefix flag and a configuration proto for a log specific path. The latter should help for various migration strategies if existing C++ server logs are going to be converted to run on the new code.
+
+### Mirroring
+
+More progress has been made on log mirroring. We believe that it's now at the point where testing can begin.
+
+### Utilities / Libraries
+
+The `certcheck` and `ct_hammer` utilities have received more enhancements.
+
+`x509` and `x509util` now support Subject Information Access and additional extensions for [RPKI / RFC 3779](https://www.ietf.org/rfc/rfc3779.txt).
+
+`scanner` / `fixchain` and some other command line utilities now have better handling of non-fatal errors.
+
+Commit [3629d6846518309d22c16fee15d1007262a459d2](https://api.github.com/repos/google/certificate-transparency-go/commits/3629d6846518309d22c16fee15d1007262a459d2) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.21)
+
+## v1.0.20 - Minimal Gossip / Go 1.11 Fix / Utility Improvements
+
+Published 2018-07-05 09:21:34 +0000 UTC
+
+Enhancements have been made to various utilities including `scanner`, `sctcheck`, `loglist` and `x509util`.
+
+The `allow_verification_with_non_compliant_keys` flag has been removed from `signatures.go`.
+
+An implementation of Gossip has been added. See the `gossip/minimal` package for more information.
+
+An X.509 compatibility issue for Go 1.11 has been fixed. This should be backwards compatible with 1.10.
+
+Commit [37a384cd035e722ea46e55029093e26687138edf](https://api.github.com/repos/google/certificate-transparency-go/commits/37a384cd035e722ea46e55029093e26687138edf) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.20)
+
+## v1.0.19 - CTFE User Quota
+
+Published 2018-06-01 13:51:52 +0000 UTC
+
+CTFE now supports Trillian Log's explicit quota API; quota can be requested based on the remote user's IP, as well as per-issuing certificate in submitted chains.
+
+Commit [8736a411b4ff214ea20687e46c2b67d66ebd83fc](https://api.github.com/repos/google/certificate-transparency-go/commits/8736a411b4ff214ea20687e46c2b67d66ebd83fc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.19)
+
+## v1.0.18 - Adding Migration Tool / Client Additions / K8 Config
+
+Published 2018-06-01 14:28:20 +0000 UTC
+
+Work on a log migration tool (Migrillian) is in progress. This is not yet ready for production use but will provide features for mirroring and migrating logs.
+
+The `RequestLog` API allows for logging of SCTs when they are issued by CTFE.
+
+The CT Go client now supports `GetEntryAndProof`. Utilities have been switched over to use the `glog` package.
+
+Commit [77abf2dac5410a62c04ac1c662c6d0fa54afc2dc](https://api.github.com/repos/google/certificate-transparency-go/commits/77abf2dac5410a62c04ac1c662c6d0fa54afc2dc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.18)
+
+## v1.0.17 - Merkle verification / Tracing / Demo script / CORS
+
+Published 2018-06-01 14:25:16 +0000 UTC
+
+Now uses Merkle Tree verification from Trillian.
+
+The CT server now supports CORS.
+
+Request tracing added using OpenCensus. For GCE / K8 it just requires the flag to be enabled to export traces to Stackdriver. Other environments may differ.
+
+A demo script was added that goes through setting up a simple deployment suitable for development / demo purposes. This may be useful for those new to the project.
+
+Commit [3c3d22ce946447d047a03228ebb4a41e3e4eb15b](https://api.github.com/repos/google/certificate-transparency-go/commits/3c3d22ce946447d047a03228ebb4a41e3e4eb15b) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.17)
+
+## v1.0.16 - Lifecycle test / Go 1.10.1
+
+Published 2018-06-01 14:22:23 +0000 UTC
+
+An integration test was added that goes through a create / drain queue / freeze lifecycle for a log.
+
+Changes to `x509` were merged from Go 1.10.1.
+
+Commit [a72423d09b410b80673fd1135ba1022d04bac6cd](https://api.github.com/repos/google/certificate-transparency-go/commits/a72423d09b410b80673fd1135ba1022d04bac6cd) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.16)
+
+## v1.0.15 - More control of verification, grpclb, stackdriver metrics
+
+Published 2018-06-01 14:20:32 +0000 UTC
+
+Facilities were added to the `x509` package to control whether verification checks are applied.
+
+Log server requests are now balanced using `gRPClb`.
+
+For Kubernetes, metrics can be published to Stackdriver monitoring.
+
+Commit [684d6eee6092774e54d301ccad0ed61bc8d010c1](https://api.github.com/repos/google/certificate-transparency-go/commits/684d6eee6092774e54d301ccad0ed61bc8d010c1) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.15)
+
+## v1.0.14 - SQLite Removed, LeafHashForLeaf
+
+Published 2018-06-01 14:15:37 +0000 UTC
+
+Support for SQLite was removed. This motivation was ongoing test flakiness caused by multi-user access. This database may work for an embedded scenario but is not suitable for use in a server environment.
+
+A `LeafHashForLeaf` client API was added and is now used by the CT client and integration tests.
+
+Commit [698cd6a661196db4b2e71437422178ffe8705006](https://api.github.com/repos/google/certificate-transparency-go/commits/698cd6a661196db4b2e71437422178ffe8705006) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.14)
+
+## v1.0.13 - Crypto changes, util updates, sync with trillian repo, loglist verification
+
+Published 2018-06-01 14:15:21 +0000 UTC
+
+Some of our custom crypto package that were wrapping calls to the standard package have been removed and the base features used directly.
+
+Updates were made to GCE ingress and health checks.
+
+The log list utility can verify signatures.
+
+Commit [480c3654a70c5383b9543ec784203030aedbd3a5](https://api.github.com/repos/google/certificate-transparency-go/commits/480c3654a70c5383b9543ec784203030aedbd3a5) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.13)
+
+## v1.0.12 - Client / util updates & CTFE fixes
+
+Published 2018-06-01 14:13:42 +0000 UTC
+
+The CT client can now use a JSON loglist to find logs.
+
+CTFE had a fix applied for preissued precerts.
+
+A DNS client was added and CT client was extended to support DNS retrieval.
+
+Commit [74c06c95e0b304a050a1c33764c8a01d653a16e3](https://api.github.com/repos/google/certificate-transparency-go/commits/74c06c95e0b304a050a1c33764c8a01d653a16e3) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.12)
+
+## v1.0.11 - Kubernetes CI / Integration fixes
+
+Published 2018-06-01 14:12:18 +0000 UTC
+
+Updates to Kubernetes configs, mostly related to running a CI instance.
+
+Commit [0856acca7e0ab7f082ae83a1fbb5d21160962efc](https://api.github.com/repos/google/certificate-transparency-go/commits/0856acca7e0ab7f082ae83a1fbb5d21160962efc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.11)
+
+## v1.0.10 - More scanner, x509, utility and client fixes. CTFE updates
+
+Published 2018-06-01 14:09:47 +0000 UTC
+
+The CT client was using the wrong protobuffer library package. To guard against this in future a check has been added to our lint config.
+
+The `x509` and `asn1` packages have had upstream fixes applied from Go 1.10rc1.
+
+Commit [1bec4527572c443752ad4f2830bef88be0533236](https://api.github.com/repos/google/certificate-transparency-go/commits/1bec4527572c443752ad4f2830bef88be0533236) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.10)
+
+## v1.0.9 - Scanner, x509, utility and client fixes
+
+Published 2018-06-01 14:11:13 +0000 UTC
+
+The `scanner` utility now displays throughput stats.
+
+Build instructions and README files were updated.
+
+The `certcheck` utility can be told to ignore unknown critical X.509 extensions.
+
+Commit [c06833528d04a94eed0c775104d1107bab9ae17c](https://api.github.com/repos/google/certificate-transparency-go/commits/c06833528d04a94eed0c775104d1107bab9ae17c) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.9)
+
+## v1.0.8 - Client fixes, align with trillian repo
+
+Published 2018-06-01 14:06:44 +0000 UTC
+
+
+
+Commit [e8b02c60f294b503dbb67de0868143f5d4935e56](https://api.github.com/repos/google/certificate-transparency-go/commits/e8b02c60f294b503dbb67de0868143f5d4935e56) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.8)
+
+## v1.0.7 - CTFE fixes
+
+Published 2018-06-01 14:06:13 +0000 UTC
+
+An issue was fixed with CTFE signature caching. In an unlikely set of circumstances this could lead to log mis-operation. While the chances of this are small, we recommend that versions prior to this one are not deployed.
+
+Commit [52c0590bd3b4b80c5497005b0f47e10557425eeb](https://api.github.com/repos/google/certificate-transparency-go/commits/52c0590bd3b4b80c5497005b0f47e10557425eeb) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.7)
+
+## v1.0.6 - crlcheck improvements / other fixes
+
+Published 2018-06-01 14:04:22 +0000 UTC
+
+The `crlcheck` utility has had several fixes and enhancements. Additionally the `hammer` now supports temporal logs.
+
+Commit [3955e4a00c42e83ff17ce25003976159c5d0f0f9](https://api.github.com/repos/google/certificate-transparency-go/commits/3955e4a00c42e83ff17ce25003976159c5d0f0f9) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.6)
+
+## v1.0.5 - X509 and asn1 fixes
+
+Published 2018-06-01 14:02:58 +0000 UTC
+
+This release is mostly fixes to the `x509` and `asn1` packages. Some command line utilities were also updated.
+
+Commit [ae40d07cce12f1227c6e658e61c9dddb7646f97b](https://api.github.com/repos/google/certificate-transparency-go/commits/ae40d07cce12f1227c6e658e61c9dddb7646f97b) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.5)
+
+## v1.0.4 - Multi log backend configs
+
+Published 2018-06-01 14:02:07 +0000 UTC
+
+Support was added to allow CTFE to use multiple backends, each serving a distinct set of logs. It allows for e.g. regional backend deployment with common frontend servers.
+
+Commit [62023ed90b41fa40854957b5dec7d9d73594723f](https://api.github.com/repos/google/certificate-transparency-go/commits/62023ed90b41fa40854957b5dec7d9d73594723f) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.4)
+
+## v1.0.3 - Hammer updates, use standard context
+
+Published 2018-06-01 14:01:11 +0000 UTC
+
+After the Go 1.9 migration references to anything other than the standard `context` package have been removed. This is the only one that should be used from now on.
+
+Commit [b28beed8b9aceacc705e0ff4a11d435a310e3d97](https://api.github.com/repos/google/certificate-transparency-go/commits/b28beed8b9aceacc705e0ff4a11d435a310e3d97) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.3)
+
+## v1.0.2 - Go 1.9
+
+Published 2018-06-01 14:00:00 +0000 UTC
+
+Go 1.9 is now required to build the code.
+
+Commit [3aed33d672ee43f04b1e8a00b25ca3e2e2e74309](https://api.github.com/repos/google/certificate-transparency-go/commits/3aed33d672ee43f04b1e8a00b25ca3e2e2e74309) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.2)
+
+## v1.0.1 - Hammer and client improvements
+
+Published 2018-06-01 13:59:29 +0000 UTC
+
+
+
+Commit [c28796cc21776667fb05d6300e32d9517be96515](https://api.github.com/repos/google/certificate-transparency-go/commits/c28796cc21776667fb05d6300e32d9517be96515) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.1)
+
+## v1.0 - First Trillian CT Release
+
+Published 2018-06-01 13:59:00 +0000 UTC
+
+This is the point that corresponds to the 1.0 release in the trillian repo.
+
+Commit [abb79e468b6f3bbd48d1ab0c9e68febf80d52c4d](https://api.github.com/repos/google/certificate-transparency-go/commits/abb79e468b6f3bbd48d1ab0c9e68febf80d52c4d) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0)
diff --git a/vendor/github.com/google/certificate-transparency-go/CODEOWNERS b/vendor/github.com/google/certificate-transparency-go/CODEOWNERS
new file mode 100644
index 00000000000..0c931e87ce2
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/CODEOWNERS
@@ -0,0 +1 @@
+* @google/certificate-transparency
diff --git a/vendor/github.com/google/certificate-transparency-go/CONTRIBUTING.md b/vendor/github.com/google/certificate-transparency-go/CONTRIBUTING.md
new file mode 100644
index 00000000000..43de4c9d470
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/CONTRIBUTING.md
@@ -0,0 +1,58 @@
+# How to contribute #
+
+We'd love to accept your patches and contributions to this project. There are
+a just a few small guidelines you need to follow.
+
+
+## Contributor License Agreement ##
+
+Contributions to any Google project must be accompanied by a Contributor
+License Agreement. This is not a copyright **assignment**, it simply gives
+Google permission to use and redistribute your contributions as part of the
+project.
+
+ * If you are an individual writing original source code and you're sure you
+ own the intellectual property, then you'll need to sign an [individual
+ CLA][].
+
+ * If you work for a company that wants to allow you to contribute your work,
+ then you'll need to sign a [corporate CLA][].
+
+You generally only need to submit a CLA once, so if you've already submitted
+one (even if it was for a different project), you probably don't need to do it
+again.
+
+[individual CLA]: https://developers.google.com/open-source/cla/individual
+[corporate CLA]: https://developers.google.com/open-source/cla/corporate
+
+Once your CLA is submitted (or if you already submitted one for
+another Google project), make a commit adding yourself to the
+[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part
+of your first [pull request][].
+
+[AUTHORS]: AUTHORS
+[CONTRIBUTORS]: CONTRIBUTORS
+
+
+## Submitting a patch ##
+
+ 1. It's generally best to start by opening a new issue describing the bug or
+ feature you're intending to fix. Even if you think it's relatively minor,
+ it's helpful to know what people are working on. Mention in the initial
+ issue that you are planning to work on that bug or feature so that it can
+ be assigned to you.
+
+ 1. Follow the normal process of [forking][] the project, and setup a new
+ branch to work in. It's important that each group of changes be done in
+ separate branches in order to ensure that a pull request only includes the
+ commits related to that bug or feature.
+
+ 1. Do your best to have [well-formed commit messages][] for each change.
+ This provides consistency throughout the project, and ensures that commit
+ messages are able to be formatted properly by various git tools.
+
+ 1. Finally, push the commits to your fork and submit a [pull request][].
+
+[forking]: https://help.github.com/articles/fork-a-repo
+[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
+[pull request]: https://help.github.com/articles/creating-a-pull-request
diff --git a/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS b/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS
new file mode 100644
index 00000000000..3a98a7e1ef5
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS
@@ -0,0 +1,62 @@
+# People who have agreed to one of the CLAs and can contribute patches.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+# https://developers.google.com/open-source/cla/individual
+# https://developers.google.com/open-source/cla/corporate
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+#
+# Names should be added to this file as:
+# Name
+#
+# Please keep the list sorted.
+
+Adam Eijdenberg
+Al Cutter
+Alex Cohn
+Ben Laurie
+Chris Kennelly
+David Drysdale
+Deyan Bektchiev
+Ed Maste
+Elisha Silas
+Emilia Kasper
+Eran Messeri
+Fiaz Hossain
+Gary Belvin
+Jeff Trawick
+Joe Tsai
+Kat Joyce
+Katriel Cohn-Gordon
+Kiril Nikolov
+Konrad Kraszewski
+Laël Cellier
+Linus Nordberg
+Mark Schloesser
+Nicholas Galbreath
+Oliver Weidner
+Pascal Leroy
+Paul Hadfield
+Paul Lietar
+Pavel Kalinnikov
+Pierre Phaneuf
+Rob Percival
+Rob Stradling
+Roger Ng
+Roland Shoemaker
+Ruslan Kovalov
+Samuel Lidén Borell
+Tatiana Merkulova
+Vladimir Rutsky
+Ximin Luo
diff --git a/vendor/github.com/google/certificate-transparency-go/LICENSE b/vendor/github.com/google/certificate-transparency-go/LICENSE
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/google/certificate-transparency-go/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/google/certificate-transparency-go/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 00000000000..c3c0feb3abc
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,16 @@
+
+
+### Checklist
+
+
+
+- [ ] I have updated the [CHANGELOG](CHANGELOG.md).
+ - Adjust the draft version number according to [semantic versioning](https://semver.org/) rules.
+- [ ] I have updated [documentation](docs/) accordingly.
diff --git a/vendor/github.com/google/certificate-transparency-go/README.md b/vendor/github.com/google/certificate-transparency-go/README.md
new file mode 100644
index 00000000000..bade700508e
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/README.md
@@ -0,0 +1,120 @@
+# Certificate Transparency: Go Code
+
+[](https://goreportcard.com/report/github.com/google/certificate-transparency-go)
+[](https://godoc.org/github.com/google/certificate-transparency-go)
+
+
+This repository holds Go code related to
+[Certificate Transparency](https://www.certificate-transparency.org/) (CT). The
+repository requires Go version 1.23.
+
+ - [Repository Structure](#repository-structure)
+ - [Trillian CT Personality](#trillian-ct-personality)
+ - [Working on the Code](#working-on-the-code)
+ - [Running Codebase Checks](#running-codebase-checks)
+ - [Rebuilding Generated Code](#rebuilding-generated-code)
+
+## Support
+
+- Slack: https://transparency-dev.slack.com/ ([invitation](https://join.slack.com/t/transparency-dev/shared_invite/zt-27pkqo21d-okUFhur7YZ0rFoJVIOPznQ))
+
+## Repository Structure
+
+The main parts of the repository are:
+
+ - Encoding libraries:
+ - `asn1/` and `x509/` are forks of the upstream Go `encoding/asn1` and
+ `crypto/x509` libraries. We maintain separate forks of these packages
+ because CT is intended to act as an observatory of certificates across the
+ ecosystem; as such, we need to be able to process somewhat-malformed
+ certificates that the stricter upstream code would (correctly) reject.
+ Our `x509` fork also includes code for working with the
+ [pre-certificates defined in RFC 6962](https://tools.ietf.org/html/rfc6962#section-3.1).
+ - `tls` holds a library for processing TLS-encoded data as described in
+ [RFC 5246](https://tools.ietf.org/html/rfc5246).
+ - `x509util/` provides additional utilities for dealing with
+ `x509.Certificate`s.
+ - CT client libraries:
+ - The top-level `ct` package (in `.`) holds types and utilities for working
+ with CT data structures defined in
+ [RFC 6962](https://tools.ietf.org/html/rfc6962).
+ - `client/` and `jsonclient/` hold libraries that allow access to CT Logs
+ via HTTP entrypoints described in
+ [section 4 of RFC 6962](https://tools.ietf.org/html/rfc6962#section-4).
+ - `dnsclient/` has a library that allows access to CT Logs over
+ [DNS](https://github.com/google/certificate-transparency-rfcs/blob/master/dns/draft-ct-over-dns.md).
+ - `scanner/` holds a library for scanning the entire contents of an existing
+ CT Log.
+ - CT Personality for [Trillian](https://github.com/google/trillian):
+ - `trillian/` holds code that allows a Certificate Transparency Log to be
+ run using a Trillian Log as its back-end -- see
+ [below](#trillian-ct-personality).
+ - Command line tools:
+ - `./client/ctclient` allows interaction with a CT Log.
+ - `./ctutil/sctcheck` allows SCTs (signed certificate timestamps) from a CT
+ Log to be verified.
+ - `./scanner/scanlog` allows an existing CT Log to be scanned for certificates
+ of interest; please be polite when running this tool against a Log.
+ - `./x509util/certcheck` allows display and verification of certificates
+ - `./x509util/crlcheck` allows display and verification of certificate
+ revocation lists (CRLs).
+ - Other libraries related to CT:
+ - `ctutil/` holds utility functions for validating and verifying CT data
+ structures.
+ - `loglist3/` has a library for reading
+ [v3 JSON lists of CT Logs](https://groups.google.com/a/chromium.org/g/ct-policy/c/IdbrdAcDQto/m/i5KPyzYwBAAJ).
+
+
+## Trillian CT Personality
+
+The `trillian/` subdirectory holds code and scripts for running a CT Log based
+on the [Trillian](https://github.com/google/trillian) general transparency Log,
+and is [documented separately](trillian/README.md).
+
+
+## Working on the Code
+
+Developers who want to make changes to the codebase need some additional
+dependencies and tools, described in the following sections.
+
+### Running Codebase Checks
+
+The [`scripts/presubmit.sh`](scripts/presubmit.sh) script runs various tools
+and tests over the codebase; please ensure this script passes before sending
+pull requests for review.
+
+```bash
+# Install golangci-lint
+go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.61.0
+
+# Run code generation, build, test and linters
+./scripts/presubmit.sh
+
+# Run build, test and linters but skip code generation
+./scripts/presubmit.sh --no-generate
+
+# Or just run the linters alone:
+golangci-lint run
+```
+
+### Rebuilding Generated Code
+
+Some of the CT Go code is autogenerated from other files:
+
+- [Protocol buffer](https://developers.google.com/protocol-buffers/) message
+ definitions are converted to `.pb.go` implementations.
+- A mock implementation of the Trillian gRPC API (in `trillian/mockclient`) is
+ created with [GoMock](https://github.com/golang/mock).
+
+Re-generating mock or protobuffer files is only needed if you're changing
+the original files; if you do, you'll need to install the prerequisites:
+
+- tools written in `go` can be installed with a single run of `go install`
+ (courtesy of [`tools.go`](./tools/tools.go) and `go.mod`).
+- `protoc` tool: you'll need [version 3.20.1](https://github.com/protocolbuffers/protobuf/releases/tag/v3.20.1) installed, and `PATH` updated to include its `bin/` directory.
+
+With tools installed, run the following:
+
+```bash
+go generate -x ./... # hunts for //go:generate comments and runs them
+```
diff --git a/vendor/github.com/google/certificate-transparency-go/asn1/README.md b/vendor/github.com/google/certificate-transparency-go/asn1/README.md
new file mode 100644
index 00000000000..a42ac4ebe33
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/asn1/README.md
@@ -0,0 +1,7 @@
+# Important Notice
+
+This is a fork of the `encoding/asn1` Go package. The original source can be found on
+[GitHub](https://github.com/golang/go).
+
+Be careful about making local modifications to this code as it will
+make maintenance harder in future.
diff --git a/vendor/github.com/google/certificate-transparency-go/asn1/asn1.go b/vendor/github.com/google/certificate-transparency-go/asn1/asn1.go
new file mode 100644
index 00000000000..aaca5fd2606
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/asn1/asn1.go
@@ -0,0 +1,1195 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package asn1 implements parsing of DER-encoded ASN.1 data structures,
+// as defined in ITU-T Rec X.690.
+//
+// See also “A Layman's Guide to a Subset of ASN.1, BER, and DER,”
+// http://luca.ntop.org/Teaching/Appunti/asn1.html.
+//
+// This is a fork of the Go standard library ASN.1 implementation
+// (encoding/asn1), with the aim of relaxing checks for various things
+// that are common errors present in many X.509 certificates in the
+// wild.
+//
+// Main differences:
+// - Extra "lax" tag that recursively applies and relaxes some strict
+// checks:
+// - parsePrintableString() copes with invalid PrintableString contents,
+// e.g. use of tagPrintableString when the string data is really
+// ISO8859-1.
+// - checkInteger() allows integers that are not minimally encoded (and
+// so are not correct DER).
+// - parseObjectIdentifier() allows zero-length OIDs.
+// - Better diagnostics on which particular field causes errors.
+package asn1
+
+// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc
+// are different encoding formats for those objects. Here, we'll be dealing
+// with DER, the Distinguished Encoding Rules. DER is used in X.509 because
+// it's fast to parse and, unlike BER, has a unique encoding for every object.
+// When calculating hashes over objects, it's important that the resulting
+// bytes be the same at both ends and DER removes this margin of error.
+//
+// ASN.1 is very complex and this package doesn't attempt to implement
+// everything by any means.
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "math/big"
+ "reflect"
+ "strconv"
+ "time"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+// A StructuralError suggests that the ASN.1 data is valid, but the Go type
+// which is receiving it doesn't match.
+type StructuralError struct {
+ Msg string
+ Field string
+}
+
+func (e StructuralError) Error() string {
+ var prefix string
+ if e.Field != "" {
+ prefix = e.Field + ": "
+ }
+ return "asn1: structure error: " + prefix + e.Msg
+}
+
+// A SyntaxError suggests that the ASN.1 data is invalid.
+type SyntaxError struct {
+ Msg string
+ Field string
+}
+
+func (e SyntaxError) Error() string {
+ var prefix string
+ if e.Field != "" {
+ prefix = e.Field + ": "
+ }
+ return "asn1: syntax error: " + prefix + e.Msg
+}
+
+// We start by dealing with each of the primitive types in turn.
+
+// BOOLEAN
+
+func parseBool(bytes []byte, fieldName string) (ret bool, err error) {
+ if len(bytes) != 1 {
+ err = SyntaxError{"invalid boolean", fieldName}
+ return
+ }
+
+ // DER demands that "If the encoding represents the boolean value TRUE,
+ // its single contents octet shall have all eight bits set to one."
+ // Thus only 0 and 255 are valid encoded values.
+ switch bytes[0] {
+ case 0:
+ ret = false
+ case 0xff:
+ ret = true
+ default:
+ err = SyntaxError{"invalid boolean", fieldName}
+ }
+
+ return
+}
+
+// INTEGER
+
+// checkInteger returns nil if the given bytes are a valid DER-encoded
+// INTEGER and an error otherwise.
+func checkInteger(bytes []byte, lax bool, fieldName string) error {
+ if len(bytes) == 0 {
+ return StructuralError{"empty integer", fieldName}
+ }
+ if len(bytes) == 1 {
+ return nil
+ }
+ if lax {
+ return nil
+ }
+ if (bytes[0] == 0 && bytes[1]&0x80 == 0) || (bytes[0] == 0xff && bytes[1]&0x80 == 0x80) {
+ return StructuralError{"integer not minimally-encoded", fieldName}
+ }
+ return nil
+}
+
+// parseInt64 treats the given bytes as a big-endian, signed integer and
+// returns the result.
+func parseInt64(bytes []byte, lax bool, fieldName string) (ret int64, err error) {
+ err = checkInteger(bytes, lax, fieldName)
+ if err != nil {
+ return
+ }
+ if len(bytes) > 8 {
+ // We'll overflow an int64 in this case.
+ err = StructuralError{"integer too large", fieldName}
+ return
+ }
+ for bytesRead := 0; bytesRead < len(bytes); bytesRead++ {
+ ret <<= 8
+ ret |= int64(bytes[bytesRead])
+ }
+
+ // Shift up and down in order to sign extend the result.
+ ret <<= 64 - uint8(len(bytes))*8
+ ret >>= 64 - uint8(len(bytes))*8
+ return
+}
+
+// parseInt treats the given bytes as a big-endian, signed integer and returns
+// the result.
+func parseInt32(bytes []byte, lax bool, fieldName string) (int32, error) {
+ if err := checkInteger(bytes, lax, fieldName); err != nil {
+ return 0, err
+ }
+ ret64, err := parseInt64(bytes, lax, fieldName)
+ if err != nil {
+ return 0, err
+ }
+ if ret64 != int64(int32(ret64)) {
+ return 0, StructuralError{"integer too large", fieldName}
+ }
+ return int32(ret64), nil
+}
+
+var bigOne = big.NewInt(1)
+
+// parseBigInt treats the given bytes as a big-endian, signed integer and returns
+// the result.
+func parseBigInt(bytes []byte, lax bool, fieldName string) (*big.Int, error) {
+ if err := checkInteger(bytes, lax, fieldName); err != nil {
+ return nil, err
+ }
+ ret := new(big.Int)
+ if len(bytes) > 0 && bytes[0]&0x80 == 0x80 {
+ // This is a negative number.
+ notBytes := make([]byte, len(bytes))
+ for i := range notBytes {
+ notBytes[i] = ^bytes[i]
+ }
+ ret.SetBytes(notBytes)
+ ret.Add(ret, bigOne)
+ ret.Neg(ret)
+ return ret, nil
+ }
+ ret.SetBytes(bytes)
+ return ret, nil
+}
+
+// BIT STRING
+
+// BitString is the structure to use when you want an ASN.1 BIT STRING type. A
+// bit string is padded up to the nearest byte in memory and the number of
+// valid bits is recorded. Padding bits will be zero.
+type BitString struct {
+ Bytes []byte // bits packed into bytes.
+ BitLength int // length in bits.
+}
+
+// At returns the bit at the given index. If the index is out of range it
+// returns false.
+func (b BitString) At(i int) int {
+ if i < 0 || i >= b.BitLength {
+ return 0
+ }
+ x := i / 8
+ y := 7 - uint(i%8)
+ return int(b.Bytes[x]>>y) & 1
+}
+
+// RightAlign returns a slice where the padding bits are at the beginning. The
+// slice may share memory with the BitString.
+func (b BitString) RightAlign() []byte {
+ shift := uint(8 - (b.BitLength % 8))
+ if shift == 8 || len(b.Bytes) == 0 {
+ return b.Bytes
+ }
+
+ a := make([]byte, len(b.Bytes))
+ a[0] = b.Bytes[0] >> shift
+ for i := 1; i < len(b.Bytes); i++ {
+ a[i] = b.Bytes[i-1] << (8 - shift)
+ a[i] |= b.Bytes[i] >> shift
+ }
+
+ return a
+}
+
+// parseBitString parses an ASN.1 bit string from the given byte slice and returns it.
+func parseBitString(bytes []byte, fieldName string) (ret BitString, err error) {
+ if len(bytes) == 0 {
+ err = SyntaxError{"zero length BIT STRING", fieldName}
+ return
+ }
+ paddingBits := int(bytes[0])
+ if paddingBits > 7 ||
+ len(bytes) == 1 && paddingBits > 0 ||
+ bytes[len(bytes)-1]&((1< 0 {
+ s += "."
+ }
+ s += strconv.Itoa(v)
+ }
+
+ return s
+}
+
+// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and
+// returns it. An object identifier is a sequence of variable length integers
+// that are assigned in a hierarchy.
+func parseObjectIdentifier(bytes []byte, lax bool, fieldName string) (s ObjectIdentifier, err error) {
+ if len(bytes) == 0 {
+ if lax {
+ return ObjectIdentifier{}, nil
+ }
+ err = SyntaxError{"zero length OBJECT IDENTIFIER", fieldName}
+ return
+ }
+
+ // In the worst case, we get two elements from the first byte (which is
+ // encoded differently) and then every varint is a single byte long.
+ s = make([]int, len(bytes)+1)
+
+ // The first varint is 40*value1 + value2:
+ // According to this packing, value1 can take the values 0, 1 and 2 only.
+ // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2,
+ // then there are no restrictions on value2.
+ v, offset, err := parseBase128Int(bytes, 0, fieldName)
+ if err != nil {
+ return
+ }
+ if v < 80 {
+ s[0] = v / 40
+ s[1] = v % 40
+ } else {
+ s[0] = 2
+ s[1] = v - 80
+ }
+
+ i := 2
+ for ; offset < len(bytes); i++ {
+ v, offset, err = parseBase128Int(bytes, offset, fieldName)
+ if err != nil {
+ return
+ }
+ s[i] = v
+ }
+ s = s[0:i]
+ return
+}
+
+// ENUMERATED
+
+// An Enumerated is represented as a plain int.
+type Enumerated int
+
+// FLAG
+
+// A Flag accepts any data and is set to true if present.
+type Flag bool
+
+// parseBase128Int parses a base-128 encoded int from the given offset in the
+// given byte slice. It returns the value and the new offset.
+func parseBase128Int(bytes []byte, initOffset int, fieldName string) (ret, offset int, err error) {
+ offset = initOffset
+ var ret64 int64
+ for shifted := 0; offset < len(bytes); shifted++ {
+ // 5 * 7 bits per byte == 35 bits of data
+ // Thus the representation is either non-minimal or too large for an int32
+ if shifted == 5 {
+ err = StructuralError{"base 128 integer too large", fieldName}
+ return
+ }
+ ret64 <<= 7
+ b := bytes[offset]
+ ret64 |= int64(b & 0x7f)
+ offset++
+ if b&0x80 == 0 {
+ ret = int(ret64)
+ // Ensure that the returned value fits in an int on all platforms
+ if ret64 > math.MaxInt32 {
+ err = StructuralError{"base 128 integer too large", fieldName}
+ }
+ return
+ }
+ }
+ err = SyntaxError{"truncated base 128 integer", fieldName}
+ return
+}
+
+// UTCTime
+
+func parseUTCTime(bytes []byte) (ret time.Time, err error) {
+ s := string(bytes)
+
+ formatStr := "0601021504Z0700"
+ ret, err = time.Parse(formatStr, s)
+ if err != nil {
+ formatStr = "060102150405Z0700"
+ ret, err = time.Parse(formatStr, s)
+ }
+ if err != nil {
+ return
+ }
+
+ if serialized := ret.Format(formatStr); serialized != s {
+ err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized)
+ return
+ }
+
+ if ret.Year() >= 2050 {
+ // UTCTime only encodes times prior to 2050. See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1
+ ret = ret.AddDate(-100, 0, 0)
+ }
+
+ return
+}
+
+// parseGeneralizedTime parses the GeneralizedTime from the given byte slice
+// and returns the resulting time.
+func parseGeneralizedTime(bytes []byte) (ret time.Time, err error) {
+ const formatStr = "20060102150405Z0700"
+ s := string(bytes)
+
+ if ret, err = time.Parse(formatStr, s); err != nil {
+ return
+ }
+
+ if serialized := ret.Format(formatStr); serialized != s {
+ err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized)
+ }
+
+ return
+}
+
+// NumericString
+
+// parseNumericString parses an ASN.1 NumericString from the given byte array
+// and returns it.
+func parseNumericString(bytes []byte, fieldName string) (ret string, err error) {
+ for _, b := range bytes {
+ if !isNumeric(b) {
+ return "", SyntaxError{"NumericString contains invalid character", fieldName}
+ }
+ }
+ return string(bytes), nil
+}
+
+// isNumeric reports whether the given b is in the ASN.1 NumericString set.
+func isNumeric(b byte) bool {
+ return '0' <= b && b <= '9' ||
+ b == ' '
+}
+
+// PrintableString
+
+// parsePrintableString parses an ASN.1 PrintableString from the given byte
+// array and returns it.
+func parsePrintableString(bytes []byte, lax bool, fieldName string) (ret string, err error) {
+ for _, b := range bytes {
+ if !isPrintable(b, allowAsterisk, allowAmpersand) {
+ if !lax {
+ err = SyntaxError{"PrintableString contains invalid character", fieldName}
+ } else {
+ // Might be an ISO8859-1 string stuffed in, check if it
+ // would be valid and assume that's what's happened if so,
+ // otherwise try T.61, failing that give up and just assign
+ // the bytes
+ switch {
+ case couldBeISO8859_1(bytes):
+ ret, err = iso8859_1ToUTF8(bytes), nil
+ case couldBeT61(bytes):
+ ret, err = parseT61String(bytes)
+ default:
+ err = SyntaxError{"PrintableString contains invalid character, couldn't determine correct String type", fieldName}
+ }
+ }
+ return
+ }
+ }
+ ret = string(bytes)
+ return
+}
+
+type asteriskFlag bool
+type ampersandFlag bool
+
+const (
+ allowAsterisk asteriskFlag = true
+ rejectAsterisk asteriskFlag = false
+
+ allowAmpersand ampersandFlag = true
+ rejectAmpersand ampersandFlag = false
+)
+
+// isPrintable reports whether the given b is in the ASN.1 PrintableString set.
+// If asterisk is allowAsterisk then '*' is also allowed, reflecting existing
+// practice. If ampersand is allowAmpersand then '&' is allowed as well.
+func isPrintable(b byte, asterisk asteriskFlag, ampersand ampersandFlag) bool {
+ return 'a' <= b && b <= 'z' ||
+ 'A' <= b && b <= 'Z' ||
+ '0' <= b && b <= '9' ||
+ '\'' <= b && b <= ')' ||
+ '+' <= b && b <= '/' ||
+ b == ' ' ||
+ b == ':' ||
+ b == '=' ||
+ b == '?' ||
+ // This is technically not allowed in a PrintableString.
+ // However, x509 certificates with wildcard strings don't
+ // always use the correct string type so we permit it.
+ (bool(asterisk) && b == '*') ||
+ // This is not technically allowed either. However, not
+ // only is it relatively common, but there are also a
+ // handful of CA certificates that contain it. At least
+ // one of which will not expire until 2027.
+ (bool(ampersand) && b == '&')
+}
+
+// IA5String
+
+// parseIA5String parses an ASN.1 IA5String (ASCII string) from the given
+// byte slice and returns it.
+func parseIA5String(bytes []byte, fieldName string) (ret string, err error) {
+ for _, b := range bytes {
+ if b >= utf8.RuneSelf {
+ err = SyntaxError{"IA5String contains invalid character", fieldName}
+ return
+ }
+ }
+ ret = string(bytes)
+ return
+}
+
+// T61String
+
+// parseT61String parses an ASN.1 T61String (8-bit clean string) from the given
+// byte slice and returns it.
+func parseT61String(bytes []byte) (ret string, err error) {
+ return string(bytes), nil
+}
+
+// UTF8String
+
+// parseUTF8String parses an ASN.1 UTF8String (raw UTF-8) from the given byte
+// array and returns it.
+func parseUTF8String(bytes []byte) (ret string, err error) {
+ if !utf8.Valid(bytes) {
+ return "", errors.New("asn1: invalid UTF-8 string")
+ }
+ return string(bytes), nil
+}
+
+// BMPString
+
+// parseBMPString parses an ASN.1 BMPString (Basic Multilingual Plane of
+// ISO/IEC/ITU 10646-1) from the given byte slice and returns it.
+func parseBMPString(bmpString []byte) (string, error) {
+ if len(bmpString)%2 != 0 {
+ return "", errors.New("pkcs12: odd-length BMP string")
+ }
+
+ // Strip terminator if present.
+ if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 {
+ bmpString = bmpString[:l-2]
+ }
+
+ s := make([]uint16, 0, len(bmpString)/2)
+ for len(bmpString) > 0 {
+ s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1]))
+ bmpString = bmpString[2:]
+ }
+
+ return string(utf16.Decode(s)), nil
+}
+
+// A RawValue represents an undecoded ASN.1 object.
+type RawValue struct {
+ Class, Tag int
+ IsCompound bool
+ Bytes []byte
+ FullBytes []byte // includes the tag and length
+}
+
+// RawContent is used to signal that the undecoded, DER data needs to be
+// preserved for a struct. To use it, the first field of the struct must have
+// this type. It's an error for any of the other fields to have this type.
+type RawContent []byte
+
+// Tagging
+
+// parseTagAndLength parses an ASN.1 tag and length pair from the given offset
+// into a byte slice. It returns the parsed data and the new offset. SET and
+// SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we
+// don't distinguish between ordered and unordered objects in this code.
+func parseTagAndLength(bytes []byte, initOffset int, fieldName string) (ret tagAndLength, offset int, err error) {
+ offset = initOffset
+ // parseTagAndLength should not be called without at least a single
+ // byte to read. Thus this check is for robustness:
+ if offset >= len(bytes) {
+ err = errors.New("asn1: internal error in parseTagAndLength")
+ return
+ }
+ b := bytes[offset]
+ offset++
+ ret.class = int(b >> 6)
+ ret.isCompound = b&0x20 == 0x20
+ ret.tag = int(b & 0x1f)
+
+ // If the bottom five bits are set, then the tag number is actually base 128
+ // encoded afterwards
+ if ret.tag == 0x1f {
+ ret.tag, offset, err = parseBase128Int(bytes, offset, fieldName)
+ if err != nil {
+ return
+ }
+ // Tags should be encoded in minimal form.
+ if ret.tag < 0x1f {
+ err = SyntaxError{"non-minimal tag", fieldName}
+ return
+ }
+ }
+ if offset >= len(bytes) {
+ err = SyntaxError{"truncated tag or length", fieldName}
+ return
+ }
+ b = bytes[offset]
+ offset++
+ if b&0x80 == 0 {
+ // The length is encoded in the bottom 7 bits.
+ ret.length = int(b & 0x7f)
+ } else {
+ // Bottom 7 bits give the number of length bytes to follow.
+ numBytes := int(b & 0x7f)
+ if numBytes == 0 {
+ err = SyntaxError{"indefinite length found (not DER)", fieldName}
+ return
+ }
+ ret.length = 0
+ for i := 0; i < numBytes; i++ {
+ if offset >= len(bytes) {
+ err = SyntaxError{"truncated tag or length", fieldName}
+ return
+ }
+ b = bytes[offset]
+ offset++
+ if ret.length >= 1<<23 {
+ // We can't shift ret.length up without
+ // overflowing.
+ err = StructuralError{"length too large", fieldName}
+ return
+ }
+ ret.length <<= 8
+ ret.length |= int(b)
+ if ret.length == 0 {
+ // DER requires that lengths be minimal.
+ err = StructuralError{"superfluous leading zeros in length", fieldName}
+ return
+ }
+ }
+ // Short lengths must be encoded in short form.
+ if ret.length < 0x80 {
+ err = StructuralError{"non-minimal length", fieldName}
+ return
+ }
+ }
+
+ return
+}
+
+// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse
+// a number of ASN.1 values from the given byte slice and returns them as a
+// slice of Go values of the given type.
+func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type, lax bool, fieldName string) (ret reflect.Value, err error) {
+ matchAny, expectedTag, compoundType, ok := getUniversalType(elemType)
+ if !ok {
+ err = StructuralError{"unknown Go type for slice", fieldName}
+ return
+ }
+
+ // First we iterate over the input and count the number of elements,
+ // checking that the types are correct in each case.
+ numElements := 0
+ for offset := 0; offset < len(bytes); {
+ var t tagAndLength
+ t, offset, err = parseTagAndLength(bytes, offset, fieldName)
+ if err != nil {
+ return
+ }
+ switch t.tag {
+ case TagIA5String, TagGeneralString, TagT61String, TagUTF8String, TagNumericString, TagBMPString:
+ // We pretend that various other string types are
+ // PRINTABLE STRINGs so that a sequence of them can be
+ // parsed into a []string.
+ t.tag = TagPrintableString
+ case TagGeneralizedTime, TagUTCTime:
+ // Likewise, both time types are treated the same.
+ t.tag = TagUTCTime
+ }
+
+ if !matchAny && (t.class != ClassUniversal || t.isCompound != compoundType || t.tag != expectedTag) {
+ err = StructuralError{fmt.Sprintf("sequence tag mismatch (got:%+v, want:0/%d/%t)", t, expectedTag, compoundType), fieldName}
+ return
+ }
+ if invalidLength(offset, t.length, len(bytes)) {
+ err = SyntaxError{"truncated sequence", fieldName}
+ return
+ }
+ offset += t.length
+ numElements++
+ }
+ ret = reflect.MakeSlice(sliceType, numElements, numElements)
+ params := fieldParameters{lax: lax}
+ offset := 0
+ for i := 0; i < numElements; i++ {
+ offset, err = parseField(ret.Index(i), bytes, offset, params)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+var (
+ bitStringType = reflect.TypeOf(BitString{})
+ objectIdentifierType = reflect.TypeOf(ObjectIdentifier{})
+ enumeratedType = reflect.TypeOf(Enumerated(0))
+ flagType = reflect.TypeOf(Flag(false))
+ timeType = reflect.TypeOf(time.Time{})
+ rawValueType = reflect.TypeOf(RawValue{})
+ rawContentsType = reflect.TypeOf(RawContent(nil))
+ bigIntType = reflect.TypeOf(new(big.Int))
+)
+
+// invalidLength reports whether offset + length > sliceLength, or if the
+// addition would overflow.
+func invalidLength(offset, length, sliceLength int) bool {
+ return offset+length < offset || offset+length > sliceLength
+}
+
+// Tests whether the data in |bytes| would be a valid ISO8859-1 string.
+// Clearly, a sequence of bytes comprised solely of valid ISO8859-1
+// codepoints does not imply that the encoding MUST be ISO8859-1, rather that
+// you would not encounter an error trying to interpret the data as such.
+func couldBeISO8859_1(bytes []byte) bool {
+ for _, b := range bytes {
+ if b < 0x20 || (b >= 0x7F && b < 0xA0) {
+ return false
+ }
+ }
+ return true
+}
+
+// Checks whether the data in |bytes| would be a valid T.61 string.
+// Clearly, a sequence of bytes comprised solely of valid T.61
+// codepoints does not imply that the encoding MUST be T.61, rather that
+// you would not encounter an error trying to interpret the data as such.
+func couldBeT61(bytes []byte) bool {
+ for _, b := range bytes {
+ switch b {
+ case 0x00:
+ // Since we're guessing at (incorrect) encodings for a
+ // PrintableString, we'll err on the side of caution and disallow
+ // strings with a NUL in them, don't want to re-create a PayPal NUL
+ // situation in monitors.
+ fallthrough
+ case 0x23, 0x24, 0x5C, 0x5E, 0x60, 0x7B, 0x7D, 0x7E, 0xA5, 0xA6, 0xAC, 0xAD, 0xAE, 0xAF,
+ 0xB9, 0xBA, 0xC0, 0xC9, 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9,
+ 0xDA, 0xDB, 0xDC, 0xDE, 0xDF, 0xE5, 0xFF:
+ // These are all invalid code points in T.61, so it can't be a T.61 string.
+ return false
+ }
+ }
+ return true
+}
+
+// Converts the data in |bytes| to the equivalent UTF-8 string.
+func iso8859_1ToUTF8(bytes []byte) string {
+ buf := make([]rune, len(bytes))
+ for i, b := range bytes {
+ buf[i] = rune(b)
+ }
+ return string(buf)
+}
+
+// parseField is the main parsing function. Given a byte slice and an offset
+// into the array, it will try to parse a suitable ASN.1 value out and store it
+// in the given Value.
+func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) {
+ offset = initOffset
+ fieldType := v.Type()
+
+ // If we have run out of data, it may be that there are optional elements at the end.
+ if offset == len(bytes) {
+ if !setDefaultValue(v, params) {
+ err = SyntaxError{"sequence truncated", params.name}
+ }
+ return
+ }
+
+ // Deal with the ANY type.
+ if ifaceType := fieldType; ifaceType.Kind() == reflect.Interface && ifaceType.NumMethod() == 0 {
+ var t tagAndLength
+ t, offset, err = parseTagAndLength(bytes, offset, params.name)
+ if err != nil {
+ return
+ }
+ if invalidLength(offset, t.length, len(bytes)) {
+ err = SyntaxError{"data truncated", params.name}
+ return
+ }
+ var result interface{}
+ if !t.isCompound && t.class == ClassUniversal {
+ innerBytes := bytes[offset : offset+t.length]
+ switch t.tag {
+ case TagPrintableString:
+ result, err = parsePrintableString(innerBytes, params.lax, params.name)
+ case TagNumericString:
+ result, err = parseNumericString(innerBytes, params.name)
+ case TagIA5String:
+ result, err = parseIA5String(innerBytes, params.name)
+ case TagT61String:
+ result, err = parseT61String(innerBytes)
+ case TagUTF8String:
+ result, err = parseUTF8String(innerBytes)
+ case TagInteger:
+ result, err = parseInt64(innerBytes, params.lax, params.name)
+ case TagBitString:
+ result, err = parseBitString(innerBytes, params.name)
+ case TagOID:
+ result, err = parseObjectIdentifier(innerBytes, params.lax, params.name)
+ case TagUTCTime:
+ result, err = parseUTCTime(innerBytes)
+ case TagGeneralizedTime:
+ result, err = parseGeneralizedTime(innerBytes)
+ case TagOctetString:
+ result = innerBytes
+ case TagBMPString:
+ result, err = parseBMPString(innerBytes)
+ default:
+ // If we don't know how to handle the type, we just leave Value as nil.
+ }
+ }
+ offset += t.length
+ if err != nil {
+ return
+ }
+ if result != nil {
+ v.Set(reflect.ValueOf(result))
+ }
+ return
+ }
+
+ t, offset, err := parseTagAndLength(bytes, offset, params.name)
+ if err != nil {
+ return
+ }
+ if params.explicit {
+ expectedClass := ClassContextSpecific
+ if params.application {
+ expectedClass = ClassApplication
+ }
+ if offset == len(bytes) {
+ err = StructuralError{"explicit tag has no child", params.name}
+ return
+ }
+ if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) {
+ if fieldType == rawValueType {
+ // The inner element should not be parsed for RawValues.
+ } else if t.length > 0 {
+ t, offset, err = parseTagAndLength(bytes, offset, params.name)
+ if err != nil {
+ return
+ }
+ } else {
+ if fieldType != flagType {
+ err = StructuralError{"zero length explicit tag was not an asn1.Flag", params.name}
+ return
+ }
+ v.SetBool(true)
+ return
+ }
+ } else {
+ // The tags didn't match, it might be an optional element.
+ ok := setDefaultValue(v, params)
+ if ok {
+ offset = initOffset
+ } else {
+ err = StructuralError{"explicitly tagged member didn't match", params.name}
+ }
+ return
+ }
+ }
+
+ matchAny, universalTag, compoundType, ok1 := getUniversalType(fieldType)
+ if !ok1 {
+ err = StructuralError{fmt.Sprintf("unknown Go type: %v", fieldType), params.name}
+ return
+ }
+
+ // Special case for strings: all the ASN.1 string types map to the Go
+ // type string. getUniversalType returns the tag for PrintableString
+ // when it sees a string, so if we see a different string type on the
+ // wire, we change the universal type to match.
+ if universalTag == TagPrintableString {
+ if t.class == ClassUniversal {
+ switch t.tag {
+ case TagIA5String, TagGeneralString, TagT61String, TagUTF8String, TagNumericString, TagBMPString:
+ universalTag = t.tag
+ }
+ } else if params.stringType != 0 {
+ universalTag = params.stringType
+ }
+ }
+
+ // Special case for time: UTCTime and GeneralizedTime both map to the
+ // Go type time.Time.
+ if universalTag == TagUTCTime && t.tag == TagGeneralizedTime && t.class == ClassUniversal {
+ universalTag = TagGeneralizedTime
+ }
+
+ if params.set {
+ universalTag = TagSet
+ }
+
+ matchAnyClassAndTag := matchAny
+ expectedClass := ClassUniversal
+ expectedTag := universalTag
+
+ if !params.explicit && params.tag != nil {
+ expectedClass = ClassContextSpecific
+ expectedTag = *params.tag
+ matchAnyClassAndTag = false
+ }
+
+ if !params.explicit && params.application && params.tag != nil {
+ expectedClass = ClassApplication
+ expectedTag = *params.tag
+ matchAnyClassAndTag = false
+ }
+
+ if !params.explicit && params.private && params.tag != nil {
+ expectedClass = ClassPrivate
+ expectedTag = *params.tag
+ matchAnyClassAndTag = false
+ }
+
+ // We have unwrapped any explicit tagging at this point.
+ if !matchAnyClassAndTag && (t.class != expectedClass || t.tag != expectedTag) ||
+ (!matchAny && t.isCompound != compoundType) {
+ // Tags don't match. Again, it could be an optional element.
+ ok := setDefaultValue(v, params)
+ if ok {
+ offset = initOffset
+ } else {
+ err = StructuralError{fmt.Sprintf("tags don't match (%d vs %+v) %+v %s @%d", expectedTag, t, params, fieldType.Name(), offset), params.name}
+ }
+ return
+ }
+ if invalidLength(offset, t.length, len(bytes)) {
+ err = SyntaxError{"data truncated", params.name}
+ return
+ }
+ innerBytes := bytes[offset : offset+t.length]
+ offset += t.length
+
+ // We deal with the structures defined in this package first.
+ switch fieldType {
+ case rawValueType:
+ result := RawValue{t.class, t.tag, t.isCompound, innerBytes, bytes[initOffset:offset]}
+ v.Set(reflect.ValueOf(result))
+ return
+ case objectIdentifierType:
+ newSlice, err1 := parseObjectIdentifier(innerBytes, params.lax, params.name)
+ v.Set(reflect.MakeSlice(v.Type(), len(newSlice), len(newSlice)))
+ if err1 == nil {
+ reflect.Copy(v, reflect.ValueOf(newSlice))
+ }
+ err = err1
+ return
+ case bitStringType:
+ bs, err1 := parseBitString(innerBytes, params.name)
+ if err1 == nil {
+ v.Set(reflect.ValueOf(bs))
+ }
+ err = err1
+ return
+ case timeType:
+ var time time.Time
+ var err1 error
+ if universalTag == TagUTCTime {
+ time, err1 = parseUTCTime(innerBytes)
+ } else {
+ time, err1 = parseGeneralizedTime(innerBytes)
+ }
+ if err1 == nil {
+ v.Set(reflect.ValueOf(time))
+ }
+ err = err1
+ return
+ case enumeratedType:
+ parsedInt, err1 := parseInt32(innerBytes, params.lax, params.name)
+ if err1 == nil {
+ v.SetInt(int64(parsedInt))
+ }
+ err = err1
+ return
+ case flagType:
+ v.SetBool(true)
+ return
+ case bigIntType:
+ parsedInt, err1 := parseBigInt(innerBytes, params.lax, params.name)
+ if err1 == nil {
+ v.Set(reflect.ValueOf(parsedInt))
+ }
+ err = err1
+ return
+ }
+ switch val := v; val.Kind() {
+ case reflect.Bool:
+ parsedBool, err1 := parseBool(innerBytes, params.name)
+ if err1 == nil {
+ val.SetBool(parsedBool)
+ }
+ err = err1
+ return
+ case reflect.Int, reflect.Int32, reflect.Int64:
+ if val.Type().Size() == 4 {
+ parsedInt, err1 := parseInt32(innerBytes, params.lax, params.name)
+ if err1 == nil {
+ val.SetInt(int64(parsedInt))
+ }
+ err = err1
+ } else {
+ parsedInt, err1 := parseInt64(innerBytes, params.lax, params.name)
+ if err1 == nil {
+ val.SetInt(parsedInt)
+ }
+ err = err1
+ }
+ return
+ // TODO(dfc) Add support for the remaining integer types
+ case reflect.Struct:
+ structType := fieldType
+
+ for i := 0; i < structType.NumField(); i++ {
+ if structType.Field(i).PkgPath != "" {
+ err = StructuralError{"struct contains unexported fields", structType.Field(i).Name}
+ return
+ }
+ }
+
+ if structType.NumField() > 0 &&
+ structType.Field(0).Type == rawContentsType {
+ bytes := bytes[initOffset:offset]
+ val.Field(0).Set(reflect.ValueOf(RawContent(bytes)))
+ }
+
+ innerOffset := 0
+ for i := 0; i < structType.NumField(); i++ {
+ field := structType.Field(i)
+ if i == 0 && field.Type == rawContentsType {
+ continue
+ }
+ innerParams := parseFieldParameters(field.Tag.Get("asn1"))
+ innerParams.name = field.Name
+ innerParams.lax = params.lax
+ innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, innerParams)
+ if err != nil {
+ return
+ }
+ }
+ // We allow extra bytes at the end of the SEQUENCE because
+ // adding elements to the end has been used in X.509 as the
+ // version numbers have increased.
+ return
+ case reflect.Slice:
+ sliceType := fieldType
+ if sliceType.Elem().Kind() == reflect.Uint8 {
+ val.Set(reflect.MakeSlice(sliceType, len(innerBytes), len(innerBytes)))
+ reflect.Copy(val, reflect.ValueOf(innerBytes))
+ return
+ }
+ newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem(), params.lax, params.name)
+ if err1 == nil {
+ val.Set(newSlice)
+ }
+ err = err1
+ return
+ case reflect.String:
+ var v string
+ switch universalTag {
+ case TagPrintableString:
+ v, err = parsePrintableString(innerBytes, params.lax, params.name)
+ case TagNumericString:
+ v, err = parseNumericString(innerBytes, params.name)
+ case TagIA5String:
+ v, err = parseIA5String(innerBytes, params.name)
+ case TagT61String:
+ v, err = parseT61String(innerBytes)
+ case TagUTF8String:
+ v, err = parseUTF8String(innerBytes)
+ case TagGeneralString:
+ // GeneralString is specified in ISO-2022/ECMA-35,
+ // A brief review suggests that it includes structures
+ // that allow the encoding to change midstring and
+ // such. We give up and pass it as an 8-bit string.
+ v, err = parseT61String(innerBytes)
+ case TagBMPString:
+ v, err = parseBMPString(innerBytes)
+
+ default:
+ err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag), params.name}
+ }
+ if err == nil {
+ val.SetString(v)
+ }
+ return
+ }
+ err = StructuralError{"unsupported: " + v.Type().String(), params.name}
+ return
+}
+
+// canHaveDefaultValue reports whether k is a Kind that we will set a default
+// value for. (A signed integer, essentially.)
+func canHaveDefaultValue(k reflect.Kind) bool {
+ switch k {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return true
+ }
+
+ return false
+}
+
+// setDefaultValue is used to install a default value, from a tag string, into
+// a Value. It is successful if the field was optional, even if a default value
+// wasn't provided or it failed to install it into the Value.
+func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) {
+ if !params.optional {
+ return
+ }
+ ok = true
+ if params.defaultValue == nil {
+ return
+ }
+ if canHaveDefaultValue(v.Kind()) {
+ v.SetInt(*params.defaultValue)
+ }
+ return
+}
+
+// Unmarshal parses the DER-encoded ASN.1 data structure b
+// and uses the reflect package to fill in an arbitrary value pointed at by val.
+// Because Unmarshal uses the reflect package, the structs
+// being written to must use upper case field names.
+//
+// An ASN.1 INTEGER can be written to an int, int32, int64,
+// or *big.Int (from the math/big package).
+// If the encoded value does not fit in the Go type,
+// Unmarshal returns a parse error.
+//
+// An ASN.1 BIT STRING can be written to a BitString.
+//
+// An ASN.1 OCTET STRING can be written to a []byte.
+//
+// An ASN.1 OBJECT IDENTIFIER can be written to an
+// ObjectIdentifier.
+//
+// An ASN.1 ENUMERATED can be written to an Enumerated.
+//
+// An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a time.Time.
+//
+// An ASN.1 PrintableString, IA5String, or NumericString can be written to a string.
+//
+// Any of the above ASN.1 values can be written to an interface{}.
+// The value stored in the interface has the corresponding Go type.
+// For integers, that type is int64.
+//
+// An ASN.1 SEQUENCE OF x or SET OF x can be written
+// to a slice if an x can be written to the slice's element type.
+//
+// An ASN.1 SEQUENCE or SET can be written to a struct
+// if each of the elements in the sequence can be
+// written to the corresponding element in the struct.
+//
+// The following tags on struct fields have special meaning to Unmarshal:
+//
+// application specifies that an APPLICATION tag is used
+// private specifies that a PRIVATE tag is used
+// default:x sets the default value for optional integer fields (only used if optional is also present)
+// explicit specifies that an additional, explicit tag wraps the implicit one
+// optional marks the field as ASN.1 OPTIONAL
+// set causes a SET, rather than a SEQUENCE type to be expected
+// tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC
+// lax relax strict encoding checks for this field, and for any fields within it
+//
+// If the type of the first field of a structure is RawContent then the raw
+// ASN1 contents of the struct will be stored in it.
+//
+// If the type name of a slice element ends with "SET" then it's treated as if
+// the "set" tag was set on it. This can be used with nested slices where a
+// struct tag cannot be given.
+//
+// Other ASN.1 types are not supported; if it encounters them,
+// Unmarshal returns a parse error.
+func Unmarshal(b []byte, val interface{}) (rest []byte, err error) {
+ return UnmarshalWithParams(b, val, "")
+}
+
+// UnmarshalWithParams allows field parameters to be specified for the
+// top-level element. The form of the params is the same as the field tags.
+func UnmarshalWithParams(b []byte, val interface{}, params string) (rest []byte, err error) {
+ v := reflect.ValueOf(val).Elem()
+ offset, err := parseField(v, b, 0, parseFieldParameters(params))
+ if err != nil {
+ return nil, err
+ }
+ return b[offset:], nil
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/asn1/common.go b/vendor/github.com/google/certificate-transparency-go/asn1/common.go
new file mode 100644
index 00000000000..982d06c09ed
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/asn1/common.go
@@ -0,0 +1,187 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package asn1
+
+import (
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// ASN.1 objects have metadata preceding them:
+// the tag: the type of the object
+// a flag denoting if this object is compound or not
+// the class type: the namespace of the tag
+// the length of the object, in bytes
+
+// Here are some standard tags and classes
+
+// ASN.1 tags represent the type of the following object.
+const (
+ TagBoolean = 1
+ TagInteger = 2
+ TagBitString = 3
+ TagOctetString = 4
+ TagNull = 5
+ TagOID = 6
+ TagEnum = 10
+ TagUTF8String = 12
+ TagSequence = 16
+ TagSet = 17
+ TagNumericString = 18
+ TagPrintableString = 19
+ TagT61String = 20
+ TagIA5String = 22
+ TagUTCTime = 23
+ TagGeneralizedTime = 24
+ TagGeneralString = 27
+ TagBMPString = 30
+)
+
+// ASN.1 class types represent the namespace of the tag.
+const (
+ ClassUniversal = 0
+ ClassApplication = 1
+ ClassContextSpecific = 2
+ ClassPrivate = 3
+)
+
+type tagAndLength struct {
+ class, tag, length int
+ isCompound bool
+}
+
+// ASN.1 has IMPLICIT and EXPLICIT tags, which can be translated as "instead
+// of" and "in addition to". When not specified, every primitive type has a
+// default tag in the UNIVERSAL class.
+//
+// For example: a BIT STRING is tagged [UNIVERSAL 3] by default (although ASN.1
+// doesn't actually have a UNIVERSAL keyword). However, by saying [IMPLICIT
+// CONTEXT-SPECIFIC 42], that means that the tag is replaced by another.
+//
+// On the other hand, if it said [EXPLICIT CONTEXT-SPECIFIC 10], then an
+// /additional/ tag would wrap the default tag. This explicit tag will have the
+// compound flag set.
+//
+// (This is used in order to remove ambiguity with optional elements.)
+//
+// You can layer EXPLICIT and IMPLICIT tags to an arbitrary depth, however we
+// don't support that here. We support a single layer of EXPLICIT or IMPLICIT
+// tagging with tag strings on the fields of a structure.
+
+// fieldParameters is the parsed representation of tag string from a structure field.
+type fieldParameters struct {
+ optional bool // true iff the field is OPTIONAL
+ explicit bool // true iff an EXPLICIT tag is in use.
+ application bool // true iff an APPLICATION tag is in use.
+ private bool // true iff a PRIVATE tag is in use.
+ defaultValue *int64 // a default value for INTEGER typed fields (maybe nil).
+ tag *int // the EXPLICIT or IMPLICIT tag (maybe nil).
+ stringType int // the string tag to use when marshaling.
+ timeType int // the time tag to use when marshaling.
+ set bool // true iff this should be encoded as a SET
+ omitEmpty bool // true iff this should be omitted if empty when marshaling.
+ lax bool // true iff unmarshalling should skip some error checks
+ name string // name of field for better diagnostics
+
+ // Invariants:
+ // if explicit is set, tag is non-nil.
+}
+
+// Given a tag string with the format specified in the package comment,
+// parseFieldParameters will parse it into a fieldParameters structure,
+// ignoring unknown parts of the string.
+func parseFieldParameters(str string) (ret fieldParameters) {
+ for _, part := range strings.Split(str, ",") {
+ switch {
+ case part == "optional":
+ ret.optional = true
+ case part == "explicit":
+ ret.explicit = true
+ if ret.tag == nil {
+ ret.tag = new(int)
+ }
+ case part == "generalized":
+ ret.timeType = TagGeneralizedTime
+ case part == "utc":
+ ret.timeType = TagUTCTime
+ case part == "ia5":
+ ret.stringType = TagIA5String
+ case part == "printable":
+ ret.stringType = TagPrintableString
+ case part == "numeric":
+ ret.stringType = TagNumericString
+ case part == "utf8":
+ ret.stringType = TagUTF8String
+ case strings.HasPrefix(part, "default:"):
+ i, err := strconv.ParseInt(part[8:], 10, 64)
+ if err == nil {
+ ret.defaultValue = new(int64)
+ *ret.defaultValue = i
+ }
+ case strings.HasPrefix(part, "tag:"):
+ i, err := strconv.Atoi(part[4:])
+ if err == nil {
+ ret.tag = new(int)
+ *ret.tag = i
+ }
+ case part == "set":
+ ret.set = true
+ case part == "application":
+ ret.application = true
+ if ret.tag == nil {
+ ret.tag = new(int)
+ }
+ case part == "private":
+ ret.private = true
+ if ret.tag == nil {
+ ret.tag = new(int)
+ }
+ case part == "omitempty":
+ ret.omitEmpty = true
+ case part == "lax":
+ ret.lax = true
+ }
+ }
+ return
+}
+
+// Given a reflected Go type, getUniversalType returns the default tag number
+// and expected compound flag.
+func getUniversalType(t reflect.Type) (matchAny bool, tagNumber int, isCompound, ok bool) {
+ switch t {
+ case rawValueType:
+ return true, -1, false, true
+ case objectIdentifierType:
+ return false, TagOID, false, true
+ case bitStringType:
+ return false, TagBitString, false, true
+ case timeType:
+ return false, TagUTCTime, false, true
+ case enumeratedType:
+ return false, TagEnum, false, true
+ case bigIntType:
+ return false, TagInteger, false, true
+ }
+ switch t.Kind() {
+ case reflect.Bool:
+ return false, TagBoolean, false, true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return false, TagInteger, false, true
+ case reflect.Struct:
+ return false, TagSequence, true, true
+ case reflect.Slice:
+ if t.Elem().Kind() == reflect.Uint8 {
+ return false, TagOctetString, false, true
+ }
+ if strings.HasSuffix(t.Name(), "SET") {
+ return false, TagSet, true, true
+ }
+ return false, TagSequence, true, true
+ case reflect.String:
+ return false, TagPrintableString, false, true
+ }
+ return false, 0, false, false
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/asn1/marshal.go b/vendor/github.com/google/certificate-transparency-go/asn1/marshal.go
new file mode 100644
index 00000000000..9801b065a18
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/asn1/marshal.go
@@ -0,0 +1,691 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package asn1
+
+import (
+ "errors"
+ "fmt"
+ "math/big"
+ "reflect"
+ "time"
+ "unicode/utf8"
+)
+
+var (
+ byte00Encoder encoder = byteEncoder(0x00)
+ byteFFEncoder encoder = byteEncoder(0xff)
+)
+
+// encoder represents an ASN.1 element that is waiting to be marshaled.
+type encoder interface {
+ // Len returns the number of bytes needed to marshal this element.
+ Len() int
+ // Encode encodes this element by writing Len() bytes to dst.
+ Encode(dst []byte)
+}
+
+type byteEncoder byte
+
+func (c byteEncoder) Len() int {
+ return 1
+}
+
+func (c byteEncoder) Encode(dst []byte) {
+ dst[0] = byte(c)
+}
+
+type bytesEncoder []byte
+
+func (b bytesEncoder) Len() int {
+ return len(b)
+}
+
+func (b bytesEncoder) Encode(dst []byte) {
+ if copy(dst, b) != len(b) {
+ panic("internal error")
+ }
+}
+
+type stringEncoder string
+
+func (s stringEncoder) Len() int {
+ return len(s)
+}
+
+func (s stringEncoder) Encode(dst []byte) {
+ if copy(dst, s) != len(s) {
+ panic("internal error")
+ }
+}
+
+type multiEncoder []encoder
+
+func (m multiEncoder) Len() int {
+ var size int
+ for _, e := range m {
+ size += e.Len()
+ }
+ return size
+}
+
+func (m multiEncoder) Encode(dst []byte) {
+ var off int
+ for _, e := range m {
+ e.Encode(dst[off:])
+ off += e.Len()
+ }
+}
+
+type taggedEncoder struct {
+ // scratch contains temporary space for encoding the tag and length of
+ // an element in order to avoid extra allocations.
+ scratch [8]byte
+ tag encoder
+ body encoder
+}
+
+func (t *taggedEncoder) Len() int {
+ return t.tag.Len() + t.body.Len()
+}
+
+func (t *taggedEncoder) Encode(dst []byte) {
+ t.tag.Encode(dst)
+ t.body.Encode(dst[t.tag.Len():])
+}
+
+type int64Encoder int64
+
+func (i int64Encoder) Len() int {
+ n := 1
+
+ for i > 127 {
+ n++
+ i >>= 8
+ }
+
+ for i < -128 {
+ n++
+ i >>= 8
+ }
+
+ return n
+}
+
+func (i int64Encoder) Encode(dst []byte) {
+ n := i.Len()
+
+ for j := 0; j < n; j++ {
+ dst[j] = byte(i >> uint((n-1-j)*8))
+ }
+}
+
+func base128IntLength(n int64) int {
+ if n == 0 {
+ return 1
+ }
+
+ l := 0
+ for i := n; i > 0; i >>= 7 {
+ l++
+ }
+
+ return l
+}
+
+func appendBase128Int(dst []byte, n int64) []byte {
+ l := base128IntLength(n)
+
+ for i := l - 1; i >= 0; i-- {
+ o := byte(n >> uint(i*7))
+ o &= 0x7f
+ if i != 0 {
+ o |= 0x80
+ }
+
+ dst = append(dst, o)
+ }
+
+ return dst
+}
+
+func makeBigInt(n *big.Int, fieldName string) (encoder, error) {
+ if n == nil {
+ return nil, StructuralError{"empty integer", fieldName}
+ }
+
+ if n.Sign() < 0 {
+ // A negative number has to be converted to two's-complement
+ // form. So we'll invert and subtract 1. If the
+ // most-significant-bit isn't set then we'll need to pad the
+ // beginning with 0xff in order to keep the number negative.
+ nMinus1 := new(big.Int).Neg(n)
+ nMinus1.Sub(nMinus1, bigOne)
+ bytes := nMinus1.Bytes()
+ for i := range bytes {
+ bytes[i] ^= 0xff
+ }
+ if len(bytes) == 0 || bytes[0]&0x80 == 0 {
+ return multiEncoder([]encoder{byteFFEncoder, bytesEncoder(bytes)}), nil
+ }
+ return bytesEncoder(bytes), nil
+ } else if n.Sign() == 0 {
+ // Zero is written as a single 0 zero rather than no bytes.
+ return byte00Encoder, nil
+ } else {
+ bytes := n.Bytes()
+ if len(bytes) > 0 && bytes[0]&0x80 != 0 {
+ // We'll have to pad this with 0x00 in order to stop it
+ // looking like a negative number.
+ return multiEncoder([]encoder{byte00Encoder, bytesEncoder(bytes)}), nil
+ }
+ return bytesEncoder(bytes), nil
+ }
+}
+
+func appendLength(dst []byte, i int) []byte {
+ n := lengthLength(i)
+
+ for ; n > 0; n-- {
+ dst = append(dst, byte(i>>uint((n-1)*8)))
+ }
+
+ return dst
+}
+
+func lengthLength(i int) (numBytes int) {
+ numBytes = 1
+ for i > 255 {
+ numBytes++
+ i >>= 8
+ }
+ return
+}
+
+func appendTagAndLength(dst []byte, t tagAndLength) []byte {
+ b := uint8(t.class) << 6
+ if t.isCompound {
+ b |= 0x20
+ }
+ if t.tag >= 31 {
+ b |= 0x1f
+ dst = append(dst, b)
+ dst = appendBase128Int(dst, int64(t.tag))
+ } else {
+ b |= uint8(t.tag)
+ dst = append(dst, b)
+ }
+
+ if t.length >= 128 {
+ l := lengthLength(t.length)
+ dst = append(dst, 0x80|byte(l))
+ dst = appendLength(dst, t.length)
+ } else {
+ dst = append(dst, byte(t.length))
+ }
+
+ return dst
+}
+
+type bitStringEncoder BitString
+
+func (b bitStringEncoder) Len() int {
+ return len(b.Bytes) + 1
+}
+
+func (b bitStringEncoder) Encode(dst []byte) {
+ dst[0] = byte((8 - b.BitLength%8) % 8)
+ if copy(dst[1:], b.Bytes) != len(b.Bytes) {
+ panic("internal error")
+ }
+}
+
+type oidEncoder []int
+
+func (oid oidEncoder) Len() int {
+ l := base128IntLength(int64(oid[0]*40 + oid[1]))
+ for i := 2; i < len(oid); i++ {
+ l += base128IntLength(int64(oid[i]))
+ }
+ return l
+}
+
+func (oid oidEncoder) Encode(dst []byte) {
+ dst = appendBase128Int(dst[:0], int64(oid[0]*40+oid[1]))
+ for i := 2; i < len(oid); i++ {
+ dst = appendBase128Int(dst, int64(oid[i]))
+ }
+}
+
+func makeObjectIdentifier(oid []int, fieldName string) (e encoder, err error) {
+ if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) {
+ return nil, StructuralError{"invalid object identifier", fieldName}
+ }
+
+ return oidEncoder(oid), nil
+}
+
+func makePrintableString(s, fieldName string) (e encoder, err error) {
+ for i := 0; i < len(s); i++ {
+ // The asterisk is often used in PrintableString, even though
+ // it is invalid. If a PrintableString was specifically
+ // requested then the asterisk is permitted by this code.
+ // Ampersand is allowed in parsing due a handful of CA
+ // certificates, however when making new certificates
+ // it is rejected.
+ if !isPrintable(s[i], allowAsterisk, rejectAmpersand) {
+ return nil, StructuralError{"PrintableString contains invalid character", fieldName}
+ }
+ }
+
+ return stringEncoder(s), nil
+}
+
+func makeIA5String(s, fieldName string) (e encoder, err error) {
+ for i := 0; i < len(s); i++ {
+ if s[i] > 127 {
+ return nil, StructuralError{"IA5String contains invalid character", fieldName}
+ }
+ }
+
+ return stringEncoder(s), nil
+}
+
+func makeNumericString(s string, fieldName string) (e encoder, err error) {
+ for i := 0; i < len(s); i++ {
+ if !isNumeric(s[i]) {
+ return nil, StructuralError{"NumericString contains invalid character", fieldName}
+ }
+ }
+
+ return stringEncoder(s), nil
+}
+
+func makeUTF8String(s string) encoder {
+ return stringEncoder(s)
+}
+
+func appendTwoDigits(dst []byte, v int) []byte {
+ return append(dst, byte('0'+(v/10)%10), byte('0'+v%10))
+}
+
+func appendFourDigits(dst []byte, v int) []byte {
+ var bytes [4]byte
+ for i := range bytes {
+ bytes[3-i] = '0' + byte(v%10)
+ v /= 10
+ }
+ return append(dst, bytes[:]...)
+}
+
+func outsideUTCRange(t time.Time) bool {
+ year := t.Year()
+ return year < 1950 || year >= 2050
+}
+
+func makeUTCTime(t time.Time, fieldName string) (e encoder, err error) {
+ dst := make([]byte, 0, 18)
+
+ dst, err = appendUTCTime(dst, t, fieldName)
+ if err != nil {
+ return nil, err
+ }
+
+ return bytesEncoder(dst), nil
+}
+
+func makeGeneralizedTime(t time.Time, fieldName string) (e encoder, err error) {
+ dst := make([]byte, 0, 20)
+
+ dst, err = appendGeneralizedTime(dst, t, fieldName)
+ if err != nil {
+ return nil, err
+ }
+
+ return bytesEncoder(dst), nil
+}
+
+func appendUTCTime(dst []byte, t time.Time, fieldName string) (ret []byte, err error) {
+ year := t.Year()
+
+ switch {
+ case 1950 <= year && year < 2000:
+ dst = appendTwoDigits(dst, year-1900)
+ case 2000 <= year && year < 2050:
+ dst = appendTwoDigits(dst, year-2000)
+ default:
+ return nil, StructuralError{"cannot represent time as UTCTime", fieldName}
+ }
+
+ return appendTimeCommon(dst, t), nil
+}
+
+func appendGeneralizedTime(dst []byte, t time.Time, fieldName string) (ret []byte, err error) {
+ year := t.Year()
+ if year < 0 || year > 9999 {
+ return nil, StructuralError{"cannot represent time as GeneralizedTime", fieldName}
+ }
+
+ dst = appendFourDigits(dst, year)
+
+ return appendTimeCommon(dst, t), nil
+}
+
+func appendTimeCommon(dst []byte, t time.Time) []byte {
+ _, month, day := t.Date()
+
+ dst = appendTwoDigits(dst, int(month))
+ dst = appendTwoDigits(dst, day)
+
+ hour, min, sec := t.Clock()
+
+ dst = appendTwoDigits(dst, hour)
+ dst = appendTwoDigits(dst, min)
+ dst = appendTwoDigits(dst, sec)
+
+ _, offset := t.Zone()
+
+ switch {
+ case offset/60 == 0:
+ return append(dst, 'Z')
+ case offset > 0:
+ dst = append(dst, '+')
+ case offset < 0:
+ dst = append(dst, '-')
+ }
+
+ offsetMinutes := offset / 60
+ if offsetMinutes < 0 {
+ offsetMinutes = -offsetMinutes
+ }
+
+ dst = appendTwoDigits(dst, offsetMinutes/60)
+ dst = appendTwoDigits(dst, offsetMinutes%60)
+
+ return dst
+}
+
+func stripTagAndLength(in []byte) []byte {
+ _, offset, err := parseTagAndLength(in, 0, "")
+ if err != nil {
+ return in
+ }
+ return in[offset:]
+}
+
+func makeBody(value reflect.Value, params fieldParameters) (e encoder, err error) {
+ switch value.Type() {
+ case flagType:
+ return bytesEncoder(nil), nil
+ case timeType:
+ t := value.Interface().(time.Time)
+ if params.timeType == TagGeneralizedTime || outsideUTCRange(t) {
+ return makeGeneralizedTime(t, params.name)
+ }
+ return makeUTCTime(t, params.name)
+ case bitStringType:
+ return bitStringEncoder(value.Interface().(BitString)), nil
+ case objectIdentifierType:
+ return makeObjectIdentifier(value.Interface().(ObjectIdentifier), params.name)
+ case bigIntType:
+ return makeBigInt(value.Interface().(*big.Int), params.name)
+ }
+
+ switch v := value; v.Kind() {
+ case reflect.Bool:
+ if v.Bool() {
+ return byteFFEncoder, nil
+ }
+ return byte00Encoder, nil
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return int64Encoder(v.Int()), nil
+ case reflect.Struct:
+ t := v.Type()
+
+ for i := 0; i < t.NumField(); i++ {
+ if t.Field(i).PkgPath != "" {
+ return nil, StructuralError{"struct contains unexported fields", t.Field(i).Name}
+ }
+ }
+
+ startingField := 0
+
+ n := t.NumField()
+ if n == 0 {
+ return bytesEncoder(nil), nil
+ }
+
+ // If the first element of the structure is a non-empty
+ // RawContents, then we don't bother serializing the rest.
+ if t.Field(0).Type == rawContentsType {
+ s := v.Field(0)
+ if s.Len() > 0 {
+ bytes := s.Bytes()
+ /* The RawContents will contain the tag and
+ * length fields but we'll also be writing
+ * those ourselves, so we strip them out of
+ * bytes */
+ return bytesEncoder(stripTagAndLength(bytes)), nil
+ }
+
+ startingField = 1
+ }
+
+ switch n1 := n - startingField; n1 {
+ case 0:
+ return bytesEncoder(nil), nil
+ case 1:
+ return makeField(v.Field(startingField), parseFieldParameters(t.Field(startingField).Tag.Get("asn1")))
+ default:
+ m := make([]encoder, n1)
+ for i := 0; i < n1; i++ {
+ m[i], err = makeField(v.Field(i+startingField), parseFieldParameters(t.Field(i+startingField).Tag.Get("asn1")))
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return multiEncoder(m), nil
+ }
+ case reflect.Slice:
+ sliceType := v.Type()
+ if sliceType.Elem().Kind() == reflect.Uint8 {
+ return bytesEncoder(v.Bytes()), nil
+ }
+
+ var fp fieldParameters
+
+ switch l := v.Len(); l {
+ case 0:
+ return bytesEncoder(nil), nil
+ case 1:
+ return makeField(v.Index(0), fp)
+ default:
+ m := make([]encoder, l)
+
+ for i := 0; i < l; i++ {
+ m[i], err = makeField(v.Index(i), fp)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return multiEncoder(m), nil
+ }
+ case reflect.String:
+ switch params.stringType {
+ case TagIA5String:
+ return makeIA5String(v.String(), params.name)
+ case TagPrintableString:
+ return makePrintableString(v.String(), params.name)
+ case TagNumericString:
+ return makeNumericString(v.String(), params.name)
+ default:
+ return makeUTF8String(v.String()), nil
+ }
+ }
+
+ return nil, StructuralError{"unknown Go type", params.name}
+}
+
+func makeField(v reflect.Value, params fieldParameters) (e encoder, err error) {
+ if !v.IsValid() {
+ return nil, fmt.Errorf("asn1: cannot marshal nil value")
+ }
+ // If the field is an interface{} then recurse into it.
+ if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 {
+ return makeField(v.Elem(), params)
+ }
+
+ if v.Kind() == reflect.Slice && v.Len() == 0 && params.omitEmpty {
+ return bytesEncoder(nil), nil
+ }
+
+ if params.optional && params.defaultValue != nil && canHaveDefaultValue(v.Kind()) {
+ defaultValue := reflect.New(v.Type()).Elem()
+ defaultValue.SetInt(*params.defaultValue)
+
+ if reflect.DeepEqual(v.Interface(), defaultValue.Interface()) {
+ return bytesEncoder(nil), nil
+ }
+ }
+
+ // If no default value is given then the zero value for the type is
+ // assumed to be the default value. This isn't obviously the correct
+ // behavior, but it's what Go has traditionally done.
+ if params.optional && params.defaultValue == nil {
+ if reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) {
+ return bytesEncoder(nil), nil
+ }
+ }
+
+ if v.Type() == rawValueType {
+ rv := v.Interface().(RawValue)
+ if len(rv.FullBytes) != 0 {
+ return bytesEncoder(rv.FullBytes), nil
+ }
+
+ t := new(taggedEncoder)
+
+ t.tag = bytesEncoder(appendTagAndLength(t.scratch[:0], tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound}))
+ t.body = bytesEncoder(rv.Bytes)
+
+ return t, nil
+ }
+
+ matchAny, tag, isCompound, ok := getUniversalType(v.Type())
+ if !ok || matchAny {
+ return nil, StructuralError{fmt.Sprintf("unknown Go type: %v", v.Type()), params.name}
+ }
+
+ if params.timeType != 0 && tag != TagUTCTime {
+ return nil, StructuralError{"explicit time type given to non-time member", params.name}
+ }
+
+ if params.stringType != 0 && tag != TagPrintableString {
+ return nil, StructuralError{"explicit string type given to non-string member", params.name}
+ }
+
+ switch tag {
+ case TagPrintableString:
+ if params.stringType == 0 {
+ // This is a string without an explicit string type. We'll use
+ // a PrintableString if the character set in the string is
+ // sufficiently limited, otherwise we'll use a UTF8String.
+ for _, r := range v.String() {
+ if r >= utf8.RuneSelf || !isPrintable(byte(r), rejectAsterisk, rejectAmpersand) {
+ if !utf8.ValidString(v.String()) {
+ return nil, errors.New("asn1: string not valid UTF-8")
+ }
+ tag = TagUTF8String
+ break
+ }
+ }
+ } else {
+ tag = params.stringType
+ }
+ case TagUTCTime:
+ if params.timeType == TagGeneralizedTime || outsideUTCRange(v.Interface().(time.Time)) {
+ tag = TagGeneralizedTime
+ }
+ }
+
+ if params.set {
+ if tag != TagSequence {
+ return nil, StructuralError{"non sequence tagged as set", params.name}
+ }
+ tag = TagSet
+ }
+
+ t := new(taggedEncoder)
+
+ t.body, err = makeBody(v, params)
+ if err != nil {
+ return nil, err
+ }
+
+ bodyLen := t.body.Len()
+
+ class := ClassUniversal
+ if params.tag != nil {
+ if params.application {
+ class = ClassApplication
+ } else if params.private {
+ class = ClassPrivate
+ } else {
+ class = ClassContextSpecific
+ }
+
+ if params.explicit {
+ t.tag = bytesEncoder(appendTagAndLength(t.scratch[:0], tagAndLength{ClassUniversal, tag, bodyLen, isCompound}))
+
+ tt := new(taggedEncoder)
+
+ tt.body = t
+
+ tt.tag = bytesEncoder(appendTagAndLength(tt.scratch[:0], tagAndLength{
+ class: class,
+ tag: *params.tag,
+ length: bodyLen + t.tag.Len(),
+ isCompound: true,
+ }))
+
+ return tt, nil
+ }
+
+ // implicit tag.
+ tag = *params.tag
+ }
+
+ t.tag = bytesEncoder(appendTagAndLength(t.scratch[:0], tagAndLength{class, tag, bodyLen, isCompound}))
+
+ return t, nil
+}
+
+// Marshal returns the ASN.1 encoding of val.
+//
+// In addition to the struct tags recognised by Unmarshal, the following can be
+// used:
+//
+// ia5: causes strings to be marshaled as ASN.1, IA5String values
+// omitempty: causes empty slices to be skipped
+// printable: causes strings to be marshaled as ASN.1, PrintableString values
+// utf8: causes strings to be marshaled as ASN.1, UTF8String values
+// utc: causes time.Time to be marshaled as ASN.1, UTCTime values
+// generalized: causes time.Time to be marshaled as ASN.1, GeneralizedTime values
+func Marshal(val interface{}) ([]byte, error) {
+ return MarshalWithParams(val, "")
+}
+
+// MarshalWithParams allows field parameters to be specified for the
+// top-level element. The form of the params is the same as the field tags.
+func MarshalWithParams(val interface{}, params string) ([]byte, error) {
+ e, err := makeField(reflect.ValueOf(val), parseFieldParameters(params))
+ if err != nil {
+ return nil, err
+ }
+ b := make([]byte, e.Len())
+ e.Encode(b)
+ return b, nil
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go
new file mode 100644
index 00000000000..a2fed51d883
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go
@@ -0,0 +1,278 @@
+// Copyright 2017 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.34.1
+// protoc v3.20.1
+// source: client/configpb/multilog.proto
+
+package configpb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// TemporalLogConfig is a set of LogShardConfig messages, whose
+// time limits should be contiguous.
+type TemporalLogConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Shard []*LogShardConfig `protobuf:"bytes,1,rep,name=shard,proto3" json:"shard,omitempty"`
+}
+
+func (x *TemporalLogConfig) Reset() {
+ *x = TemporalLogConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_client_configpb_multilog_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TemporalLogConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TemporalLogConfig) ProtoMessage() {}
+
+func (x *TemporalLogConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_client_configpb_multilog_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TemporalLogConfig.ProtoReflect.Descriptor instead.
+func (*TemporalLogConfig) Descriptor() ([]byte, []int) {
+ return file_client_configpb_multilog_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *TemporalLogConfig) GetShard() []*LogShardConfig {
+ if x != nil {
+ return x.Shard
+ }
+ return nil
+}
+
+// LogShardConfig describes the acceptable date range for a single shard of a temporal
+// log.
+type LogShardConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"`
+ // The log's public key in DER-encoded PKIX form.
+ PublicKeyDer []byte `protobuf:"bytes,2,opt,name=public_key_der,json=publicKeyDer,proto3" json:"public_key_der,omitempty"`
+ // not_after_start defines the start of the range of acceptable NotAfter
+ // values, inclusive.
+ // Leaving this unset implies no lower bound to the range.
+ NotAfterStart *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=not_after_start,json=notAfterStart,proto3" json:"not_after_start,omitempty"`
+ // not_after_limit defines the end of the range of acceptable NotAfter values,
+ // exclusive.
+ // Leaving this unset implies no upper bound to the range.
+ NotAfterLimit *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=not_after_limit,json=notAfterLimit,proto3" json:"not_after_limit,omitempty"`
+}
+
+func (x *LogShardConfig) Reset() {
+ *x = LogShardConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_client_configpb_multilog_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LogShardConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LogShardConfig) ProtoMessage() {}
+
+func (x *LogShardConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_client_configpb_multilog_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LogShardConfig.ProtoReflect.Descriptor instead.
+func (*LogShardConfig) Descriptor() ([]byte, []int) {
+ return file_client_configpb_multilog_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *LogShardConfig) GetUri() string {
+ if x != nil {
+ return x.Uri
+ }
+ return ""
+}
+
+func (x *LogShardConfig) GetPublicKeyDer() []byte {
+ if x != nil {
+ return x.PublicKeyDer
+ }
+ return nil
+}
+
+func (x *LogShardConfig) GetNotAfterStart() *timestamppb.Timestamp {
+ if x != nil {
+ return x.NotAfterStart
+ }
+ return nil
+}
+
+func (x *LogShardConfig) GetNotAfterLimit() *timestamppb.Timestamp {
+ if x != nil {
+ return x.NotAfterLimit
+ }
+ return nil
+}
+
+var File_client_configpb_multilog_proto protoreflect.FileDescriptor
+
+var file_client_configpb_multilog_proto_rawDesc = []byte{
+ 0x0a, 0x1e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70,
+ 0x62, 0x2f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x08, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, 0x62, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x43, 0x0a, 0x11, 0x54,
+ 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x12, 0x2e, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x18, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x67, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x22, 0xd0, 0x01, 0x0a, 0x0e, 0x4c, 0x6f, 0x67, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f,
+ 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70,
+ 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x44, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0f, 0x6e,
+ 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12,
+ 0x42, 0x0a, 0x0f, 0x6e, 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d,
+ 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x4c, 0x69,
+ 0x6d, 0x69, 0x74, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x65, 0x2d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63,
+ 0x79, 0x2d, 0x67, 0x6f, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x75, 0x6c, 0x74,
+ 0x69, 0x6c, 0x6f, 0x67, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x70, 0x62, 0x62, 0x06, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_client_configpb_multilog_proto_rawDescOnce sync.Once
+ file_client_configpb_multilog_proto_rawDescData = file_client_configpb_multilog_proto_rawDesc
+)
+
+func file_client_configpb_multilog_proto_rawDescGZIP() []byte {
+ file_client_configpb_multilog_proto_rawDescOnce.Do(func() {
+ file_client_configpb_multilog_proto_rawDescData = protoimpl.X.CompressGZIP(file_client_configpb_multilog_proto_rawDescData)
+ })
+ return file_client_configpb_multilog_proto_rawDescData
+}
+
+var file_client_configpb_multilog_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_client_configpb_multilog_proto_goTypes = []interface{}{
+ (*TemporalLogConfig)(nil), // 0: configpb.TemporalLogConfig
+ (*LogShardConfig)(nil), // 1: configpb.LogShardConfig
+ (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp
+}
+var file_client_configpb_multilog_proto_depIdxs = []int32{
+ 1, // 0: configpb.TemporalLogConfig.shard:type_name -> configpb.LogShardConfig
+ 2, // 1: configpb.LogShardConfig.not_after_start:type_name -> google.protobuf.Timestamp
+ 2, // 2: configpb.LogShardConfig.not_after_limit:type_name -> google.protobuf.Timestamp
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_client_configpb_multilog_proto_init() }
+func file_client_configpb_multilog_proto_init() {
+ if File_client_configpb_multilog_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_client_configpb_multilog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TemporalLogConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_client_configpb_multilog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LogShardConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_client_configpb_multilog_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_client_configpb_multilog_proto_goTypes,
+ DependencyIndexes: file_client_configpb_multilog_proto_depIdxs,
+ MessageInfos: file_client_configpb_multilog_proto_msgTypes,
+ }.Build()
+ File_client_configpb_multilog_proto = out.File
+ file_client_configpb_multilog_proto_rawDesc = nil
+ file_client_configpb_multilog_proto_goTypes = nil
+ file_client_configpb_multilog_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto
new file mode 100644
index 00000000000..0774c35e210
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto
@@ -0,0 +1,45 @@
+// Copyright 2017 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package configpb;
+
+option go_package = "github.com/google/certificate-transparency-go/client/multilog/configpb";
+
+import "google/protobuf/timestamp.proto";
+
+// TemporalLogConfig is a set of LogShardConfig messages, whose
+// time limits should be contiguous.
+message TemporalLogConfig {
+ repeated LogShardConfig shard = 1;
+}
+
+// LogShardConfig describes the acceptable date range for a single shard of a temporal
+// log.
+message LogShardConfig {
+ string uri = 1;
+
+ // The log's public key in DER-encoded PKIX form.
+ bytes public_key_der = 2;
+
+ // not_after_start defines the start of the range of acceptable NotAfter
+ // values, inclusive.
+ // Leaving this unset implies no lower bound to the range.
+ google.protobuf.Timestamp not_after_start = 3;
+ // not_after_limit defines the end of the range of acceptable NotAfter values,
+ // exclusive.
+ // Leaving this unset implies no upper bound to the range.
+ google.protobuf.Timestamp not_after_limit = 4;
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/client/getentries.go b/vendor/github.com/google/certificate-transparency-go/client/getentries.go
new file mode 100644
index 00000000000..103dc815803
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/client/getentries.go
@@ -0,0 +1,68 @@
+// Copyright 2016 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "context"
+ "errors"
+ "strconv"
+
+ ct "github.com/google/certificate-transparency-go"
+ "github.com/google/certificate-transparency-go/x509"
+)
+
+// GetRawEntries exposes the /ct/v1/get-entries result with only the JSON parsing done.
+func (c *LogClient) GetRawEntries(ctx context.Context, start, end int64) (*ct.GetEntriesResponse, error) {
+ if end < 0 {
+ return nil, errors.New("end should be >= 0")
+ }
+ if end < start {
+ return nil, errors.New("start should be <= end")
+ }
+
+ params := map[string]string{
+ "start": strconv.FormatInt(start, 10),
+ "end": strconv.FormatInt(end, 10),
+ }
+
+ var resp ct.GetEntriesResponse
+ if _, _, err := c.GetAndParse(ctx, ct.GetEntriesPath, params, &resp); err != nil {
+ return nil, err
+ }
+
+ return &resp, nil
+}
+
+// GetEntries attempts to retrieve the entries in the sequence [start, end] from the CT log server
+// (RFC6962 s4.6) as parsed [pre-]certificates for convenience, held in a slice of ct.LogEntry structures.
+// However, this does mean that any certificate parsing failures will cause a failure of the whole
+// retrieval operation; for more robust retrieval of parsed certificates, use GetRawEntries() and invoke
+// ct.LogEntryFromLeaf() on each individual entry.
+func (c *LogClient) GetEntries(ctx context.Context, start, end int64) ([]ct.LogEntry, error) {
+ resp, err := c.GetRawEntries(ctx, start, end)
+ if err != nil {
+ return nil, err
+ }
+ entries := make([]ct.LogEntry, len(resp.Entries))
+ for i, entry := range resp.Entries {
+ index := start + int64(i)
+ logEntry, err := ct.LogEntryFromLeaf(index, &entry)
+ if x509.IsFatal(err) {
+ return nil, err
+ }
+ entries[i] = *logEntry
+ }
+ return entries, nil
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/client/logclient.go b/vendor/github.com/google/certificate-transparency-go/client/logclient.go
new file mode 100644
index 00000000000..0e90c1077f4
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/client/logclient.go
@@ -0,0 +1,227 @@
+// Copyright 2014 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package client is a CT log client implementation and contains types and code
+// for interacting with RFC6962-compliant CT Log instances.
+// See http://tools.ietf.org/html/rfc6962 for details
+package client
+
+import (
+ "context"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "strconv"
+
+ ct "github.com/google/certificate-transparency-go"
+ "github.com/google/certificate-transparency-go/jsonclient"
+ "github.com/google/certificate-transparency-go/tls"
+)
+
+// LogClient represents a client for a given CT Log instance
+type LogClient struct {
+ jsonclient.JSONClient
+}
+
+// CheckLogClient is an interface that allows (just) checking of various log contents.
+type CheckLogClient interface {
+ BaseURI() string
+ GetSTH(context.Context) (*ct.SignedTreeHead, error)
+ GetSTHConsistency(ctx context.Context, first, second uint64) ([][]byte, error)
+ GetProofByHash(ctx context.Context, hash []byte, treeSize uint64) (*ct.GetProofByHashResponse, error)
+}
+
+// New constructs a new LogClient instance.
+// |uri| is the base URI of the CT log instance to interact with, e.g.
+// https://ct.googleapis.com/pilot
+// |hc| is the underlying client to be used for HTTP requests to the CT log.
+// |opts| can be used to provide a custom logger interface and a public key
+// for signature verification.
+func New(uri string, hc *http.Client, opts jsonclient.Options) (*LogClient, error) {
+ logClient, err := jsonclient.New(uri, hc, opts)
+ if err != nil {
+ return nil, err
+ }
+ return &LogClient{*logClient}, err
+}
+
+// RspError represents a server error including HTTP information.
+type RspError = jsonclient.RspError
+
+// Attempts to add |chain| to the log, using the api end-point specified by
+// |path|. If provided context expires before submission is complete an
+// error will be returned.
+func (c *LogClient) addChainWithRetry(ctx context.Context, ctype ct.LogEntryType, path string, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
+ var resp ct.AddChainResponse
+ var req ct.AddChainRequest
+ for _, link := range chain {
+ req.Chain = append(req.Chain, link.Data)
+ }
+
+ httpRsp, body, err := c.PostAndParseWithRetry(ctx, path, &req, &resp)
+ if err != nil {
+ return nil, err
+ }
+
+ var ds ct.DigitallySigned
+ if rest, err := tls.Unmarshal(resp.Signature, &ds); err != nil {
+ return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
+ } else if len(rest) > 0 {
+ return nil, RspError{
+ Err: fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)),
+ StatusCode: httpRsp.StatusCode,
+ Body: body,
+ }
+ }
+
+ exts, err := base64.StdEncoding.DecodeString(resp.Extensions)
+ if err != nil {
+ return nil, RspError{
+ Err: fmt.Errorf("invalid base64 data in Extensions (%q): %v", resp.Extensions, err),
+ StatusCode: httpRsp.StatusCode,
+ Body: body,
+ }
+ }
+
+ var logID ct.LogID
+ copy(logID.KeyID[:], resp.ID)
+ sct := &ct.SignedCertificateTimestamp{
+ SCTVersion: resp.SCTVersion,
+ LogID: logID,
+ Timestamp: resp.Timestamp,
+ Extensions: ct.CTExtensions(exts),
+ Signature: ds,
+ }
+ if err := c.VerifySCTSignature(*sct, ctype, chain); err != nil {
+ return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
+ }
+ return sct, nil
+}
+
+// AddChain adds the (DER represented) X509 |chain| to the log.
+func (c *LogClient) AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
+ return c.addChainWithRetry(ctx, ct.X509LogEntryType, ct.AddChainPath, chain)
+}
+
+// AddPreChain adds the (DER represented) Precertificate |chain| to the log.
+func (c *LogClient) AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
+ return c.addChainWithRetry(ctx, ct.PrecertLogEntryType, ct.AddPreChainPath, chain)
+}
+
+// GetSTH retrieves the current STH from the log.
+// Returns a populated SignedTreeHead, or a non-nil error (which may be of type
+// RspError if a raw http.Response is available).
+func (c *LogClient) GetSTH(ctx context.Context) (*ct.SignedTreeHead, error) {
+ var resp ct.GetSTHResponse
+ httpRsp, body, err := c.GetAndParse(ctx, ct.GetSTHPath, nil, &resp)
+ if err != nil {
+ return nil, err
+ }
+
+ sth, err := resp.ToSignedTreeHead()
+ if err != nil {
+ return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
+ }
+
+ if err := c.VerifySTHSignature(*sth); err != nil {
+ return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
+ }
+ return sth, nil
+}
+
+// VerifySTHSignature checks the signature in sth, returning any error encountered or nil if verification is
+// successful.
+func (c *LogClient) VerifySTHSignature(sth ct.SignedTreeHead) error {
+ if c.Verifier == nil {
+ // Can't verify signatures without a verifier
+ return nil
+ }
+ return c.Verifier.VerifySTHSignature(sth)
+}
+
+// VerifySCTSignature checks the signature in sct for the given LogEntryType, with associated certificate chain.
+func (c *LogClient) VerifySCTSignature(sct ct.SignedCertificateTimestamp, ctype ct.LogEntryType, certData []ct.ASN1Cert) error {
+ if c.Verifier == nil {
+ // Can't verify signatures without a verifier
+ return nil
+ }
+ leaf, err := ct.MerkleTreeLeafFromRawChain(certData, ctype, sct.Timestamp)
+ if err != nil {
+ return fmt.Errorf("failed to build MerkleTreeLeaf: %v", err)
+ }
+ leaf.TimestampedEntry.Extensions = sct.Extensions
+ entry := ct.LogEntry{Leaf: *leaf}
+ return c.Verifier.VerifySCTSignature(sct, entry)
+}
+
+// GetSTHConsistency retrieves the consistency proof between two snapshots.
+func (c *LogClient) GetSTHConsistency(ctx context.Context, first, second uint64) ([][]byte, error) {
+ base10 := 10
+ params := map[string]string{
+ "first": strconv.FormatUint(first, base10),
+ "second": strconv.FormatUint(second, base10),
+ }
+ var resp ct.GetSTHConsistencyResponse
+ if _, _, err := c.GetAndParse(ctx, ct.GetSTHConsistencyPath, params, &resp); err != nil {
+ return nil, err
+ }
+ return resp.Consistency, nil
+}
+
+// GetProofByHash returns an audit path for the hash of an SCT.
+func (c *LogClient) GetProofByHash(ctx context.Context, hash []byte, treeSize uint64) (*ct.GetProofByHashResponse, error) {
+ b64Hash := base64.StdEncoding.EncodeToString(hash)
+ base10 := 10
+ params := map[string]string{
+ "tree_size": strconv.FormatUint(treeSize, base10),
+ "hash": b64Hash,
+ }
+ var resp ct.GetProofByHashResponse
+ if _, _, err := c.GetAndParse(ctx, ct.GetProofByHashPath, params, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+// GetAcceptedRoots retrieves the set of acceptable root certificates for a log.
+func (c *LogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) {
+ var resp ct.GetRootsResponse
+ httpRsp, body, err := c.GetAndParse(ctx, ct.GetRootsPath, nil, &resp)
+ if err != nil {
+ return nil, err
+ }
+ var roots []ct.ASN1Cert
+ for _, cert64 := range resp.Certificates {
+ cert, err := base64.StdEncoding.DecodeString(cert64)
+ if err != nil {
+ return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
+ }
+ roots = append(roots, ct.ASN1Cert{Data: cert})
+ }
+ return roots, nil
+}
+
+// GetEntryAndProof returns a log entry and audit path for the index of a leaf.
+func (c *LogClient) GetEntryAndProof(ctx context.Context, index, treeSize uint64) (*ct.GetEntryAndProofResponse, error) {
+ base10 := 10
+ params := map[string]string{
+ "leaf_index": strconv.FormatUint(index, base10),
+ "tree_size": strconv.FormatUint(treeSize, base10),
+ }
+ var resp ct.GetEntryAndProofResponse
+ if _, _, err := c.GetAndParse(ctx, ct.GetEntryAndProofPath, params, &resp); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/client/multilog.go b/vendor/github.com/google/certificate-transparency-go/client/multilog.go
new file mode 100644
index 00000000000..afd75a6db4a
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/client/multilog.go
@@ -0,0 +1,223 @@
+// Copyright 2017 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "context"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "net/http"
+ "os"
+ "time"
+
+ ct "github.com/google/certificate-transparency-go"
+ "github.com/google/certificate-transparency-go/client/configpb"
+ "github.com/google/certificate-transparency-go/jsonclient"
+ "github.com/google/certificate-transparency-go/x509"
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/proto"
+)
+
+type interval struct {
+ lower *time.Time // nil => no lower bound
+ upper *time.Time // nil => no upper bound
+}
+
+// TemporalLogConfigFromFile creates a TemporalLogConfig object from the given
+// filename, which should contain text-protobuf encoded configuration data.
+func TemporalLogConfigFromFile(filename string) (*configpb.TemporalLogConfig, error) {
+ if len(filename) == 0 {
+ return nil, errors.New("log config filename empty")
+ }
+
+ cfgBytes, err := os.ReadFile(filename)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read log config: %v", err)
+ }
+
+ var cfg configpb.TemporalLogConfig
+ if txtErr := prototext.Unmarshal(cfgBytes, &cfg); txtErr != nil {
+ if binErr := proto.Unmarshal(cfgBytes, &cfg); binErr != nil {
+ return nil, fmt.Errorf("failed to parse TemporalLogConfig from %q as text protobuf (%v) or binary protobuf (%v)", filename, txtErr, binErr)
+ }
+ }
+
+ if len(cfg.Shard) == 0 {
+ return nil, errors.New("empty log config found")
+ }
+ return &cfg, nil
+}
+
+// AddLogClient is an interface that allows adding certificates and pre-certificates to a log.
+// Both LogClient and TemporalLogClient implement this interface, which allows users to
+// commonize code for adding certs to normal/temporal logs.
+type AddLogClient interface {
+ AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error)
+ AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error)
+ GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error)
+}
+
+// TemporalLogClient allows [pre-]certificates to be uploaded to a temporal log.
+type TemporalLogClient struct {
+ Clients []*LogClient
+ intervals []interval
+}
+
+// NewTemporalLogClient builds a new client for interacting with a temporal log.
+// The provided config should be contiguous and chronological.
+func NewTemporalLogClient(cfg *configpb.TemporalLogConfig, hc *http.Client) (*TemporalLogClient, error) {
+ if len(cfg.GetShard()) == 0 {
+ return nil, errors.New("empty config")
+ }
+
+ overall, err := shardInterval(cfg.Shard[0])
+ if err != nil {
+ return nil, fmt.Errorf("cfg.Shard[0] invalid: %v", err)
+ }
+ intervals := make([]interval, 0, len(cfg.Shard))
+ intervals = append(intervals, overall)
+ for i := 1; i < len(cfg.Shard); i++ {
+ interval, err := shardInterval(cfg.Shard[i])
+ if err != nil {
+ return nil, fmt.Errorf("cfg.Shard[%d] invalid: %v", i, err)
+ }
+ if overall.upper == nil {
+ return nil, fmt.Errorf("cfg.Shard[%d] extends an interval with no upper bound", i)
+ }
+ if interval.lower == nil {
+ return nil, fmt.Errorf("cfg.Shard[%d] has no lower bound but extends an interval", i)
+ }
+ if !interval.lower.Equal(*overall.upper) {
+ return nil, fmt.Errorf("cfg.Shard[%d] starts at %v but previous interval ended at %v", i, interval.lower, overall.upper)
+ }
+ overall.upper = interval.upper
+ intervals = append(intervals, interval)
+ }
+ clients := make([]*LogClient, 0, len(cfg.Shard))
+ for i, shard := range cfg.Shard {
+ opts := jsonclient.Options{UserAgent: "ct-go-multilog/1.0"}
+ opts.PublicKeyDER = shard.GetPublicKeyDer()
+ c, err := New(shard.Uri, hc, opts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create client for cfg.Shard[%d]: %v", i, err)
+ }
+ clients = append(clients, c)
+ }
+ tlc := TemporalLogClient{
+ Clients: clients,
+ intervals: intervals,
+ }
+ return &tlc, nil
+}
+
+// GetAcceptedRoots retrieves the set of acceptable root certificates for all
+// of the shards of a temporal log (i.e. the union).
+func (tlc *TemporalLogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) {
+ type result struct {
+ roots []ct.ASN1Cert
+ err error
+ }
+ results := make(chan result, len(tlc.Clients))
+ for _, c := range tlc.Clients {
+ go func(c *LogClient) {
+ var r result
+ r.roots, r.err = c.GetAcceptedRoots(ctx)
+ results <- r
+ }(c)
+ }
+
+ var allRoots []ct.ASN1Cert
+ seen := make(map[[sha256.Size]byte]bool)
+ for range tlc.Clients {
+ r := <-results
+ if r.err != nil {
+ return nil, r.err
+ }
+ for _, root := range r.roots {
+ h := sha256.Sum256(root.Data)
+ if seen[h] {
+ continue
+ }
+ seen[h] = true
+ allRoots = append(allRoots, root)
+ }
+ }
+ return allRoots, nil
+}
+
+// AddChain adds the (DER represented) X509 chain to the appropriate log.
+func (tlc *TemporalLogClient) AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
+ return tlc.addChain(ctx, ct.X509LogEntryType, ct.AddChainPath, chain)
+}
+
+// AddPreChain adds the (DER represented) Precertificate chain to the appropriate log.
+func (tlc *TemporalLogClient) AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
+ return tlc.addChain(ctx, ct.PrecertLogEntryType, ct.AddPreChainPath, chain)
+}
+
+func (tlc *TemporalLogClient) addChain(ctx context.Context, ctype ct.LogEntryType, path string, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
+ // Parse the first entry in the chain
+ if len(chain) == 0 {
+ return nil, errors.New("missing chain")
+ }
+ cert, err := x509.ParseCertificate(chain[0].Data)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse initial chain entry: %v", err)
+ }
+ cidx, err := tlc.IndexByDate(cert.NotAfter)
+ if err != nil {
+ return nil, fmt.Errorf("failed to find log to process cert: %v", err)
+ }
+ return tlc.Clients[cidx].addChainWithRetry(ctx, ctype, path, chain)
+}
+
+// IndexByDate returns the index of the Clients entry that is appropriate for the given
+// date.
+func (tlc *TemporalLogClient) IndexByDate(when time.Time) (int, error) {
+ for i, interval := range tlc.intervals {
+ if (interval.lower != nil) && when.Before(*interval.lower) {
+ continue
+ }
+ if (interval.upper != nil) && !when.Before(*interval.upper) {
+ continue
+ }
+ return i, nil
+ }
+ return -1, fmt.Errorf("no log found encompassing date %v", when)
+}
+
+func shardInterval(cfg *configpb.LogShardConfig) (interval, error) {
+ var interval interval
+ if cfg.NotAfterStart != nil {
+ if err := cfg.NotAfterStart.CheckValid(); err != nil {
+ return interval, fmt.Errorf("failed to parse NotAfterStart: %v", err)
+ }
+ t := cfg.NotAfterStart.AsTime()
+ interval.lower = &t
+ }
+ if cfg.NotAfterLimit != nil {
+ if err := cfg.NotAfterLimit.CheckValid(); err != nil {
+ return interval, fmt.Errorf("failed to parse NotAfterLimit: %v", err)
+ }
+ t := cfg.NotAfterLimit.AsTime()
+ interval.upper = &t
+ }
+
+ if interval.lower != nil && interval.upper != nil && !(*interval.lower).Before(*interval.upper) {
+ return interval, errors.New("inverted interval")
+ }
+ return interval, nil
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml
new file mode 100644
index 00000000000..50ac5ef7b13
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml
@@ -0,0 +1,214 @@
+#############################################################################
+## The top section of this file is identical in the 3 cloudbuild.*yaml files.
+## Make sure any edits you make here are copied over to the other files too
+## if appropriate.
+##
+## TODO(al): consider if it's possible to merge these 3 files and control via
+## substitutions.
+#############################################################################
+
+timeout: 1200s
+options:
+ machineType: N1_HIGHCPU_32
+ volumes:
+ - name: go-modules
+ path: /go
+ env:
+ - GOPROXY=https://proxy.golang.org
+ - PROJECT_ROOT=github.com/google/certificate-transparency-go
+ - GOPATH=/go
+
+substitutions:
+ _CLUSTER_NAME: trillian-opensource-ci
+ _MASTER_ZONE: us-central1-a
+
+# Cloud Build logs sent to GCS bucket
+logsBucket: 'gs://trillian-cloudbuild-logs'
+
+steps:
+# First build a "ct_testbase" docker image which contains most of the tools we need for the later steps:
+- name: 'gcr.io/cloud-builders/docker'
+ entrypoint: 'bash'
+ args: ['-c', 'docker pull gcr.io/$PROJECT_ID/ct_testbase:latest || exit 0']
+- name: 'gcr.io/cloud-builders/docker'
+ args: [
+ 'build',
+ '-t', 'gcr.io/$PROJECT_ID/ct_testbase:latest',
+ '--cache-from', 'gcr.io/$PROJECT_ID/ct_testbase:latest',
+ '-f', './integration/Dockerfile',
+ '.'
+ ]
+
+# prepare spins up an ephemeral trillian instance for testing use.
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ entrypoint: 'bash'
+ id: 'prepare'
+ args:
+ - '-exc'
+ - |
+ # Use latest versions of Trillian docker images built by the Trillian CI cloudbuilders.
+ docker pull gcr.io/$PROJECT_ID/log_server:latest
+ docker tag gcr.io/$PROJECT_ID/log_server:latest deployment_trillian-log-server
+ docker pull gcr.io/$PROJECT_ID/log_signer:latest
+ docker tag gcr.io/$PROJECT_ID/log_signer:latest deployment_trillian-log-signer
+
+ # Bring up an ephemeral trillian instance using the docker-compose config in the Trillian repo:
+ export TRILLIAN_LOCATION="$$(go list -f '{{.Dir}}' github.com/google/trillian)"
+
+ # We need to fix up Trillian's docker-compose to connect to the CloudBuild network to that tests can use it:
+ echo -e "networks:\n default:\n external:\n name: cloudbuild" >> $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml
+
+ docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml pull mysql trillian-log-server trillian-log-signer
+ docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml up -d mysql trillian-log-server trillian-log-signer
+
+# Install proto related bits and block on Trillian being ready
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'ci-ready'
+ entrypoint: 'bash'
+ args:
+ - '-ec'
+ - |
+ go install \
+ github.com/golang/protobuf/proto \
+ github.com/golang/protobuf/protoc-gen-go \
+ github.com/golang/mock/mockgen \
+ go.etcd.io/etcd/v3 go.etcd.io/etcd/etcdctl/v3 \
+ github.com/fullstorydev/grpcurl/cmd/grpcurl
+
+ # Generate all protoc and mockgen files
+ go generate -run="protoc" ./...
+ go generate -run="mockgen" ./...
+
+ # Cache all the modules we'll need too
+ go mod download
+ go test ./...
+
+ # Wait for trillian logserver to be up
+ until nc -z deployment_trillian-log-server_1 8090; do echo .; sleep 5; done
+
+ # Reset the CT test database
+ export CT_GO_PATH="$$(go list -f '{{.Dir}}' github.com/google/certificate-transparency-go)"
+ export MYSQL_HOST="mysql"
+ export MYSQL_ROOT_PASSWORD="zaphod"
+ export MYSQL_USER_HOST="%"
+ yes | bash "$${CT_GO_PATH}/scripts/resetctdb.sh" --verbose
+ waitFor: ['prepare']
+
+# Run the presubmit tests
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'default_test'
+ env:
+ - 'GOFLAGS='
+ - 'PRESUBMIT_OPTS=--no-linters --no-generate'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'race_detection'
+ env:
+ - 'GOFLAGS=-race'
+ - 'PRESUBMIT_OPTS=--no-linters --no-generate'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'etcd_with_coverage'
+ env:
+ - 'GOFLAGS='
+ - 'PRESUBMIT_OPTS=--no-linters --no-generate --coverage'
+ - 'WITH_ETCD=true'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'etcd_with_race'
+ env:
+ - 'GOFLAGS=-race'
+ - 'PRESUBMIT_OPTS=--no-linters --no-generate'
+ - 'WITH_ETCD=true'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'with_pkcs11_and_race'
+ env:
+ - 'GOFLAGS=-race --tags=pkcs11'
+ - 'PRESUBMIT_OPTS=--no-linters --no-generate'
+ - 'WITH_PKCS11=true'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+# Collect and submit codecoverage reports
+- name: 'gcr.io/cloud-builders/curl'
+ id: 'codecov.io'
+ entrypoint: bash
+ args: ['-c', 'bash <(curl -s https://codecov.io/bash)']
+ env:
+ - 'VCS_COMMIT_ID=$COMMIT_SHA'
+ - 'VCS_BRANCH_NAME=$BRANCH_NAME'
+ - 'VCS_PULL_REQUEST=$_PR_NUMBER'
+ - 'CI_BUILD_ID=$BUILD_ID'
+ - 'CODECOV_TOKEN=$_CODECOV_TOKEN' # _CODECOV_TOKEN is specified in the cloud build trigger
+ waitFor: ['etcd_with_coverage']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'ci_complete'
+ entrypoint: /bin/true
+ waitFor: ['codecov.io', 'default_test', 'race_detection', 'etcd_with_coverage', 'etcd_with_race', 'with_pkcs11_and_race']
+
+############################################################################
+## End of replicated section.
+## Below are deployment specific steps for the CD env.
+############################################################################
+
+- id: build_ctfe
+ name: gcr.io/cloud-builders/docker
+ args:
+ - build
+ - --file=trillian/examples/deployment/docker/ctfe/Dockerfile
+ - --tag=gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA}
+ - --cache-from=gcr.io/${PROJECT_ID}/ctfe
+ - .
+ waitFor: [-]
+- id: build_envsubst
+ name: gcr.io/cloud-builders/docker
+ args:
+ - build
+ - trillian/examples/deployment/docker/envsubst
+ - -t
+ - envsubst
+ waitFor: ['ci_complete']
+- id: envsubst_kubernetes_configs
+ name: envsubst
+ args:
+ - trillian/examples/deployment/kubernetes/ctfe-deployment.yaml
+ - trillian/examples/deployment/kubernetes/ctfe-service.yaml
+ - trillian/examples/deployment/kubernetes/ctfe-ingress.yaml
+ env:
+ - PROJECT_ID=${PROJECT_ID}
+ - IMAGE_TAG=${COMMIT_SHA}
+ waitFor:
+ - build_envsubst
+- id: update_kubernetes_configs_dryrun
+ name: gcr.io/cloud-builders/kubectl
+ args:
+ - apply
+ - --dry-run=server
+ - -f=trillian/examples/deployment/kubernetes/ctfe-deployment.yaml
+ - -f=trillian/examples/deployment/kubernetes/ctfe-service.yaml
+ - -f=trillian/examples/deployment/kubernetes/ctfe-ingress.yaml
+ env:
+ - CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE}
+ - CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME}
+ waitFor:
+ - envsubst_kubernetes_configs
+ - build_ctfe
+
+images:
+- gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA}
+- gcr.io/${PROJECT_ID}/ct_testbase:latest
diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml
new file mode 100644
index 00000000000..566edfe0f97
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml
@@ -0,0 +1,230 @@
+#############################################################################
+## The top section of this file is identical in the 3 cloudbuild.*yaml files.
+## Make sure any edits you make here are copied over to the other files too
+## if appropriate.
+##
+## TODO(al): consider if it's possible to merge these 3 files and control via
+## substitutions.
+#############################################################################
+
+timeout: 1200s
+options:
+ machineType: N1_HIGHCPU_32
+ volumes:
+ - name: go-modules
+ path: /go
+ env:
+ - GOPROXY=https://proxy.golang.org
+ - PROJECT_ROOT=github.com/google/certificate-transparency-go
+ - GOPATH=/go
+
+substitutions:
+ _CLUSTER_NAME: trillian-opensource-ci
+ _MASTER_ZONE: us-central1-a
+
+# Cloud Build logs sent to GCS bucket
+logsBucket: 'gs://trillian-cloudbuild-logs'
+
+steps:
+# First build a "ct_testbase" docker image which contains most of the tools we need for the later steps:
+- name: 'gcr.io/cloud-builders/docker'
+ entrypoint: 'bash'
+ args: ['-c', 'docker pull gcr.io/$PROJECT_ID/ct_testbase:latest || exit 0']
+- name: 'gcr.io/cloud-builders/docker'
+ args: [
+ 'build',
+ '-t', 'gcr.io/$PROJECT_ID/ct_testbase:latest',
+ '--cache-from', 'gcr.io/$PROJECT_ID/ct_testbase:latest',
+ '-f', './integration/Dockerfile',
+ '.'
+ ]
+
+# prepare spins up an ephemeral trillian instance for testing use.
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ entrypoint: 'bash'
+ id: 'prepare'
+ args:
+ - '-exc'
+ - |
+ # Use latest versions of Trillian docker images built by the Trillian CI cloudbuilders.
+ docker pull gcr.io/$PROJECT_ID/log_server:latest
+ docker tag gcr.io/$PROJECT_ID/log_server:latest deployment_trillian-log-server
+ docker pull gcr.io/$PROJECT_ID/log_signer:latest
+ docker tag gcr.io/$PROJECT_ID/log_signer:latest deployment_trillian-log-signer
+
+ # Bring up an ephemeral trillian instance using the docker-compose config in the Trillian repo:
+ export TRILLIAN_LOCATION="$$(go list -f '{{.Dir}}' github.com/google/trillian)"
+
+ # We need to fix up Trillian's docker-compose to connect to the CloudBuild network to that tests can use it:
+ echo -e "networks:\n default:\n external:\n name: cloudbuild" >> $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml
+
+ docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml pull mysql trillian-log-server trillian-log-signer
+ docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml up -d mysql trillian-log-server trillian-log-signer
+
+# Install proto related bits and block on Trillian being ready
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'ci-ready'
+ entrypoint: 'bash'
+ args:
+ - '-ec'
+ - |
+ go install \
+ github.com/golang/protobuf/proto \
+ github.com/golang/protobuf/protoc-gen-go \
+ github.com/golang/mock/mockgen \
+ go.etcd.io/etcd/v3 go.etcd.io/etcd/etcdctl/v3 \
+ github.com/fullstorydev/grpcurl/cmd/grpcurl
+
+ # Generate all protoc and mockgen files
+ go generate -run="protoc" ./...
+ go generate -run="mockgen" ./...
+
+ # Cache all the modules we'll need too
+ go mod download
+ go test ./...
+
+ # Wait for trillian logserver to be up
+ until nc -z deployment_trillian-log-server_1 8090; do echo .; sleep 5; done
+
+ # Reset the CT test database
+ export CT_GO_PATH="$$(go list -f '{{.Dir}}' github.com/google/certificate-transparency-go)"
+ export MYSQL_HOST="mysql"
+ export MYSQL_ROOT_PASSWORD="zaphod"
+ export MYSQL_USER_HOST="%"
+ yes | bash "$${CT_GO_PATH}/scripts/resetctdb.sh" --verbose
+ waitFor: ['prepare']
+
+# Run the presubmit tests
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'default_test'
+ env:
+ - 'GOFLAGS='
+ - 'PRESUBMIT_OPTS=--no-linters --no-generate'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'race_detection'
+ env:
+ - 'GOFLAGS=-race'
+ - 'PRESUBMIT_OPTS=--no-linters --no-generate'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'etcd_with_coverage'
+ env:
+ - 'GOFLAGS='
+ - 'PRESUBMIT_OPTS=--no-linters --no-generate --coverage'
+ - 'WITH_ETCD=true'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'etcd_with_race'
+ env:
+ - 'GOFLAGS=-race'
+ - 'PRESUBMIT_OPTS=--no-linters --no-generate'
+ - 'WITH_ETCD=true'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'with_pkcs11_and_race'
+ env:
+ - 'GOFLAGS=-race --tags=pkcs11'
+ - 'PRESUBMIT_OPTS=--no-linters --no-generate'
+ - 'WITH_PKCS11=true'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+# Collect and submit codecoverage reports
+- name: 'gcr.io/cloud-builders/curl'
+ id: 'codecov.io'
+ entrypoint: bash
+ args: ['-c', 'bash <(curl -s https://codecov.io/bash)']
+ env:
+ - 'VCS_COMMIT_ID=$COMMIT_SHA'
+ - 'VCS_BRANCH_NAME=$BRANCH_NAME'
+ - 'VCS_PULL_REQUEST=$_PR_NUMBER'
+ - 'CI_BUILD_ID=$BUILD_ID'
+ - 'CODECOV_TOKEN=$_CODECOV_TOKEN' # _CODECOV_TOKEN is specified in the cloud build trigger
+ waitFor: ['etcd_with_coverage']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'ci_complete'
+ entrypoint: /bin/true
+ waitFor: ['codecov.io', 'default_test', 'race_detection', 'etcd_with_coverage', 'etcd_with_race', 'with_pkcs11_and_race']
+
+############################################################################
+## End of replicated section.
+## Below are deployment specific steps for the CD env.
+############################################################################
+
+- id: build_ctfe
+ name: gcr.io/cloud-builders/docker
+ args:
+ - build
+ - --file=trillian/examples/deployment/docker/ctfe/Dockerfile
+ - --tag=gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA}
+ - --cache-from=gcr.io/${PROJECT_ID}/ctfe
+ - .
+ waitFor: ["-"]
+- id: push_ctfe
+ name: gcr.io/cloud-builders/docker
+ args:
+ - push
+ - gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA}
+ waitFor:
+ - build_ctfe
+- id: tag_latest_ctfe
+ name: gcr.io/cloud-builders/gcloud
+ args:
+ - container
+ - images
+ - add-tag
+ - gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA}
+ - gcr.io/${PROJECT_ID}/ctfe:latest
+ waitFor:
+ - push_ctfe
+- id: build_envsubst
+ name: gcr.io/cloud-builders/docker
+ args:
+ - build
+ - trillian/examples/deployment/docker/envsubst
+ - -t
+ - envsubst
+ waitFor: ["-"]
+- id: envsubst_kubernetes_configs
+ name: envsubst
+ args:
+ - trillian/examples/deployment/kubernetes/ctfe-deployment.yaml
+ - trillian/examples/deployment/kubernetes/ctfe-service.yaml
+ - trillian/examples/deployment/kubernetes/ctfe-ingress.yaml
+ env:
+ - PROJECT_ID=${PROJECT_ID}
+ - IMAGE_TAG=${COMMIT_SHA}
+ waitFor:
+ - build_envsubst
+- id: update_kubernetes_configs
+ name: gcr.io/cloud-builders/kubectl
+ args:
+ - apply
+ - -f=trillian/examples/deployment/kubernetes/ctfe-deployment.yaml
+ - -f=trillian/examples/deployment/kubernetes/ctfe-service.yaml
+ - -f=trillian/examples/deployment/kubernetes/ctfe-ingress.yaml
+ env:
+ - CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE}
+ - CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME}
+ waitFor:
+ - envsubst_kubernetes_configs
+ - push_ctfe
+
+images:
+- gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA}
+- gcr.io/${PROJECT_ID}/ct_testbase:latest
diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild_postgresql.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild_postgresql.yaml
new file mode 100644
index 00000000000..37faca72ac0
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild_postgresql.yaml
@@ -0,0 +1,161 @@
+#############################################################################
+## This file is based on cloudbuild.yaml, but targets PostgreSQL instead of
+## MySQL.
+#############################################################################
+
+timeout: 1200s
+options:
+ machineType: N1_HIGHCPU_32
+ volumes:
+ - name: go-modules
+ path: /go
+ env:
+ - GOPROXY=https://proxy.golang.org
+ - PROJECT_ROOT=github.com/google/certificate-transparency-go
+ - GOPATH=/go
+
+substitutions:
+ _CLUSTER_NAME: trillian-opensource-ci
+ _MASTER_ZONE: us-central1-a
+
+# Cloud Build logs sent to GCS bucket
+logsBucket: 'gs://trillian-cloudbuild-logs'
+
+steps:
+# First build a "ct_testbase" docker image which contains most of the tools we need for the later steps:
+- name: 'gcr.io/cloud-builders/docker'
+ entrypoint: 'bash'
+ args: ['-c', 'docker pull gcr.io/$PROJECT_ID/ct_testbase:latest || exit 0']
+- name: 'gcr.io/cloud-builders/docker'
+ args: [
+ 'build',
+ '-t', 'gcr.io/$PROJECT_ID/ct_testbase:latest',
+ '--cache-from', 'gcr.io/$PROJECT_ID/ct_testbase:latest',
+ '-f', './integration/Dockerfile',
+ '.'
+ ]
+
+# prepare spins up an ephemeral trillian instance for testing use.
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ entrypoint: 'bash'
+ id: 'prepare'
+ args:
+ - '-exc'
+ - |
+ # Use latest versions of Trillian docker images built by the Trillian CI cloudbuilders.
+ docker pull gcr.io/$PROJECT_ID/log_server:latest
+ docker tag gcr.io/$PROJECT_ID/log_server:latest postgresql_trillian-log-server
+ docker pull gcr.io/$PROJECT_ID/log_signer:latest
+ docker tag gcr.io/$PROJECT_ID/log_signer:latest postgresql_trillian-log-signer
+
+ # Bring up an ephemeral trillian instance using the docker-compose config in the Trillian repo:
+ export TRILLIAN_LOCATION="$$(go list -f '{{.Dir}}' github.com/google/trillian)"
+
+ # We need to fix up Trillian's docker-compose to connect to the CloudBuild network to that tests can use it:
+ echo -e "networks:\n default:\n external:\n name: cloudbuild" >> $${TRILLIAN_LOCATION}/examples/deployment/postgresql/docker-compose.yml
+
+ docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/postgresql/docker-compose.yml pull postgresql trillian-log-server trillian-log-signer
+ docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/postgresql/docker-compose.yml up -d postgresql trillian-log-server trillian-log-signer
+
+# Install proto related bits and block on Trillian being ready
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'ci-ready'
+ entrypoint: 'bash'
+ args:
+ - '-ec'
+ - |
+ go install \
+ github.com/golang/protobuf/proto \
+ github.com/golang/protobuf/protoc-gen-go \
+ github.com/golang/mock/mockgen \
+ go.etcd.io/etcd/v3 go.etcd.io/etcd/etcdctl/v3 \
+ github.com/fullstorydev/grpcurl/cmd/grpcurl
+
+ # Generate all protoc and mockgen files
+ go generate -run="protoc" ./...
+ go generate -run="mockgen" ./...
+
+ # Cache all the modules we'll need too
+ go mod download
+ go test ./...
+
+ # Wait for trillian logserver to be up
+ until nc -z postgresql_trillian-log-server_1 8090; do echo .; sleep 5; done
+
+ # Reset the CT test database
+ export CT_GO_PATH="$$(go list -f '{{.Dir}}' github.com/google/certificate-transparency-go)"
+ export POSTGRESQL_HOST="postgresql"
+ yes | bash "$${CT_GO_PATH}/scripts/resetpgctdb.sh" --verbose
+ waitFor: ['prepare']
+
+# Run the presubmit tests
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'default_test'
+ env:
+ - 'GOFLAGS='
+ - 'PRESUBMIT_OPTS=--no-linters --no-generate'
+ - 'TRILLIAN_LOG_SERVERS=postgresql_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=postgresql_trillian-log-server_1:8090'
+ - 'CONFIG_SUBDIR=/postgresql'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'race_detection'
+ env:
+ - 'GOFLAGS=-race'
+ - 'PRESUBMIT_OPTS=--no-linters --no-generate'
+ - 'TRILLIAN_LOG_SERVERS=postgresql_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=postgresql_trillian-log-server_1:8090'
+ - 'CONFIG_SUBDIR=/postgresql'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'etcd_with_coverage'
+ env:
+ - 'GOFLAGS='
+ - 'PRESUBMIT_OPTS=--no-linters --no-generate --coverage'
+ - 'WITH_ETCD=true'
+ - 'TRILLIAN_LOG_SERVERS=postgresql_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=postgresql_trillian-log-server_1:8090'
+ - 'CONFIG_SUBDIR=/postgresql'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'etcd_with_race'
+ env:
+ - 'GOFLAGS=-race'
+ - 'PRESUBMIT_OPTS=--no-linters --no-generate'
+ - 'WITH_ETCD=true'
+ - 'TRILLIAN_LOG_SERVERS=postgresql_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=postgresql_trillian-log-server_1:8090'
+ - 'CONFIG_SUBDIR=/postgresql'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'with_pkcs11_and_race'
+ env:
+ - 'GOFLAGS=-race --tags=pkcs11'
+ - 'PRESUBMIT_OPTS=--no-linters --no-generate'
+ - 'WITH_PKCS11=true'
+ - 'TRILLIAN_LOG_SERVERS=postgresql_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=postgresql_trillian-log-server_1:8090'
+ - 'CONFIG_SUBDIR=/postgresql'
+ waitFor: ['ci-ready']
+
+# Collect and submit codecoverage reports
+- name: 'gcr.io/cloud-builders/curl'
+ id: 'codecov.io'
+ entrypoint: bash
+ args: ['-c', 'bash <(curl -s https://codecov.io/bash)']
+ env:
+ - 'VCS_COMMIT_ID=$COMMIT_SHA'
+ - 'VCS_BRANCH_NAME=$BRANCH_NAME'
+ - 'VCS_PULL_REQUEST=$_PR_NUMBER'
+ - 'CI_BUILD_ID=$BUILD_ID'
+ - 'CODECOV_TOKEN=$_CODECOV_TOKEN' # _CODECOV_TOKEN is specified in the cloud build trigger
+ waitFor: ['etcd_with_coverage']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'ci_complete'
+ entrypoint: /bin/true
+ waitFor: ['codecov.io', 'default_test', 'race_detection', 'etcd_with_coverage', 'etcd_with_race', 'with_pkcs11_and_race']
diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml
new file mode 100644
index 00000000000..ce9efc2b2cb
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml
@@ -0,0 +1,173 @@
+#############################################################################
+## The top section of this file is identical in the 3 cloudbuild.*yaml files.
+## Make sure any edits you make here are copied over to the other files too
+## if appropriate.
+##
+## TODO(al): consider if it's possible to merge these 3 files and control via
+## substitutions.
+#############################################################################
+
+timeout: 1200s
+options:
+ machineType: N1_HIGHCPU_32
+ volumes:
+ - name: go-modules
+ path: /go
+ env:
+ - GOPROXY=https://proxy.golang.org
+ - PROJECT_ROOT=github.com/google/certificate-transparency-go
+ - GOPATH=/go
+
+substitutions:
+ _CLUSTER_NAME: trillian-opensource-ci
+ _MASTER_ZONE: us-central1-a
+
+# Cloud Build logs sent to GCS bucket
+logsBucket: 'gs://trillian-cloudbuild-logs'
+
+steps:
+# First build a "ct_testbase" docker image which contains most of the tools we need for the later steps:
+- name: 'gcr.io/cloud-builders/docker'
+ entrypoint: 'bash'
+ args: ['-c', 'docker pull gcr.io/$PROJECT_ID/ct_testbase:latest || exit 0']
+- name: 'gcr.io/cloud-builders/docker'
+ args: [
+ 'build',
+ '-t', 'gcr.io/$PROJECT_ID/ct_testbase:latest',
+ '--cache-from', 'gcr.io/$PROJECT_ID/ct_testbase:latest',
+ '-f', './integration/Dockerfile',
+ '.'
+ ]
+
+# prepare spins up an ephemeral trillian instance for testing use.
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ entrypoint: 'bash'
+ id: 'prepare'
+ args:
+ - '-exc'
+ - |
+ # Use latest versions of Trillian docker images built by the Trillian CI cloudbuilders.
+ docker pull gcr.io/$PROJECT_ID/log_server:latest
+ docker tag gcr.io/$PROJECT_ID/log_server:latest deployment_trillian-log-server
+ docker pull gcr.io/$PROJECT_ID/log_signer:latest
+ docker tag gcr.io/$PROJECT_ID/log_signer:latest deployment_trillian-log-signer
+
+ # Bring up an ephemeral trillian instance using the docker-compose config in the Trillian repo:
+ export TRILLIAN_LOCATION="$$(go list -f '{{.Dir}}' github.com/google/trillian)"
+
+ # We need to fix up Trillian's docker-compose to connect to the CloudBuild network to that tests can use it:
+ echo -e "networks:\n default:\n external:\n name: cloudbuild" >> $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml
+
+ docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml pull mysql trillian-log-server trillian-log-signer
+ docker-compose -f $${TRILLIAN_LOCATION}/examples/deployment/docker-compose.yml up -d mysql trillian-log-server trillian-log-signer
+
+# Install proto related bits and block on Trillian being ready
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'ci-ready'
+ entrypoint: 'bash'
+ args:
+ - '-ec'
+ - |
+ go install \
+ github.com/golang/protobuf/proto \
+ github.com/golang/protobuf/protoc-gen-go \
+ github.com/golang/mock/mockgen \
+ go.etcd.io/etcd/v3 go.etcd.io/etcd/etcdctl/v3 \
+ github.com/fullstorydev/grpcurl/cmd/grpcurl
+
+ # Generate all protoc and mockgen files
+ go generate -run="protoc" ./...
+ go generate -run="mockgen" ./...
+
+ # Cache all the modules we'll need too
+ go mod download
+ go test ./...
+
+ # Wait for trillian logserver to be up
+ until nc -z deployment_trillian-log-server_1 8090; do echo .; sleep 5; done
+ waitFor: ['prepare']
+
+# Run the presubmit tests
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'default_test'
+ env:
+ - 'GOFLAGS='
+ - 'PRESUBMIT_OPTS=--no-linters --no-generate'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'race_detection'
+ env:
+ - 'GOFLAGS=-race'
+ - 'PRESUBMIT_OPTS=--no-linters'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'etcd_with_coverage'
+ env:
+ - 'GOFLAGS='
+ - 'PRESUBMIT_OPTS=--no-linters --coverage'
+ - 'WITH_ETCD=true'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'etcd_with_race'
+ env:
+ - 'GOFLAGS=-race'
+ - 'PRESUBMIT_OPTS=--no-linters'
+ - 'WITH_ETCD=true'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'with_pkcs11_and_race'
+ env:
+ - 'GOFLAGS=-race --tags=pkcs11'
+ - 'PRESUBMIT_OPTS=--no-linters'
+ - 'WITH_PKCS11=true'
+ - 'TRILLIAN_LOG_SERVERS=deployment_trillian-log-server_1:8090'
+ - 'TRILLIAN_LOG_SERVER_1=deployment_trillian-log-server_1:8090'
+ waitFor: ['ci-ready']
+
+# Collect and submit codecoverage reports
+- name: 'gcr.io/cloud-builders/curl'
+ id: 'codecov.io'
+ entrypoint: bash
+ args: ['-c', 'bash <(curl -s https://codecov.io/bash)']
+ env:
+ - 'VCS_COMMIT_ID=$COMMIT_SHA'
+ - 'VCS_BRANCH_NAME=$BRANCH_NAME'
+ - 'VCS_PULL_REQUEST=$_PR_NUMBER'
+ - 'CI_BUILD_ID=$BUILD_ID'
+ - 'CODECOV_TOKEN=$_CODECOV_TOKEN' # _CODECOV_TOKEN is specified in the cloud build trigger
+ waitFor: ['etcd_with_coverage']
+
+- name: gcr.io/$PROJECT_ID/ct_testbase
+ id: 'ci_complete'
+ entrypoint: /bin/true
+ waitFor: ['codecov.io', 'default_test', 'race_detection', 'etcd_with_coverage', 'etcd_with_race', 'with_pkcs11_and_race']
+
+############################################################################
+## End of replicated section.
+## Below are deployment specific steps for the CD env.
+############################################################################
+
+- id: build_ctfe
+ name: gcr.io/cloud-builders/docker
+ args:
+ - build
+ - --file=trillian/examples/deployment/docker/ctfe/Dockerfile
+ - --tag=gcr.io/${PROJECT_ID}/ctfe:${TAG_NAME}
+ - --cache-from=gcr.io/${PROJECT_ID}/ctfe
+ - .
+
+images:
+- gcr.io/${PROJECT_ID}/ctfe:${TAG_NAME}
+- gcr.io/${PROJECT_ID}/ct_testbase:latest
diff --git a/vendor/github.com/google/certificate-transparency-go/codecov.yml b/vendor/github.com/google/certificate-transparency-go/codecov.yml
new file mode 100644
index 00000000000..7269ff27150
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/codecov.yml
@@ -0,0 +1,19 @@
+# Customizations to codecov for c-t-go repo. This will be merged into
+# the team / default codecov yaml file.
+#
+# Validate changes with:
+# curl --data-binary @codecov.yml https://codecov.io/validate
+
+# Exclude code that's for testing, demos or utilities that aren't really
+# part of production releases.
+ignore:
+ - "**/mock_*.go"
+ - "**/testonly"
+ - "trillian/integration"
+
+coverage:
+ status:
+ project:
+ default:
+ # Allow 1% coverage drop without complaining, to avoid being too noisy.
+ threshold: 1%
diff --git a/vendor/github.com/google/certificate-transparency-go/ctutil/ctutil.go b/vendor/github.com/google/certificate-transparency-go/ctutil/ctutil.go
new file mode 100644
index 00000000000..640fcec9c1f
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/ctutil/ctutil.go
@@ -0,0 +1,211 @@
+// Copyright 2018 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package ctutil contains utilities for Certificate Transparency.
+package ctutil
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/sha256"
+ "encoding/base64"
+ "errors"
+ "fmt"
+
+ ct "github.com/google/certificate-transparency-go"
+ "github.com/google/certificate-transparency-go/tls"
+ "github.com/google/certificate-transparency-go/x509"
+)
+
+var emptyHash = [sha256.Size]byte{}
+
+// LeafHashB64 does as LeafHash does, but returns the leaf hash base64-encoded.
+// The base64-encoded leaf hash returned by B64LeafHash can be used with the
+// get-proof-by-hash API endpoint of Certificate Transparency Logs.
+func LeafHashB64(chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) (string, error) {
+ hash, err := LeafHash(chain, sct, embedded)
+ if err != nil {
+ return "", err
+ }
+ return base64.StdEncoding.EncodeToString(hash[:]), nil
+}
+
+// LeafHash calculates the leaf hash of the certificate or precertificate at
+// chain[0] that sct was issued for.
+//
+// sct is required because the SCT timestamp is used to calculate the leaf hash.
+// Leaf hashes are unique to (pre)certificate-SCT pairs.
+//
+// This function can be used with three different types of leaf certificate:
+// - X.509 Certificate:
+// If using this function to calculate the leaf hash for a normal X.509
+// certificate then it is enough to just provide the end entity
+// certificate in chain. This case assumes that the SCT being provided is
+// not embedded within the leaf certificate provided, i.e. the certificate
+// is what was submitted to the Certificate Transparency Log in order to
+// obtain the SCT. For this case, set embedded to false.
+// - Precertificate:
+// If using this function to calculate the leaf hash for a precertificate
+// then the issuing certificate must also be provided in chain. The
+// precertificate should be at chain[0], and its issuer at chain[1]. For
+// this case, set embedded to false.
+// - X.509 Certificate containing the SCT embedded within it:
+// If using this function to calculate the leaf hash for a certificate
+// where the SCT provided is embedded within the certificate you
+// are providing at chain[0], set embedded to true. LeafHash will
+// calculate the leaf hash by building the corresponding precertificate.
+// LeafHash will return an error if the provided SCT cannot be found
+// embedded within chain[0]. As with the precertificate case, the issuing
+// certificate must also be provided in chain. The certificate containing
+// the embedded SCT should be at chain[0], and its issuer at chain[1].
+//
+// Note: LeafHash doesn't check that the provided SCT verifies for the given
+// chain. It simply calculates what the leaf hash would be for the given
+// (pre)certificate-SCT pair.
+func LeafHash(chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) ([sha256.Size]byte, error) {
+ leaf, err := createLeaf(chain, sct, embedded)
+ if err != nil {
+ return emptyHash, err
+ }
+ return ct.LeafHashForLeaf(leaf)
+}
+
+// VerifySCT takes the public key of a Certificate Transparency Log, a
+// certificate chain, and an SCT and verifies whether the SCT is a valid SCT for
+// the certificate at chain[0], signed by the Log that the public key belongs
+// to. If the SCT does not verify, an error will be returned.
+//
+// This function can be used with three different types of leaf certificate:
+// - X.509 Certificate:
+// If using this function to verify an SCT for a normal X.509 certificate
+// then it is enough to just provide the end entity certificate in chain.
+// This case assumes that the SCT being provided is not embedded within
+// the leaf certificate provided, i.e. the certificate is what was
+// submitted to the Certificate Transparency Log in order to obtain the
+// SCT. For this case, set embedded to false.
+// - Precertificate:
+// If using this function to verify an SCT for a precertificate then the
+// issuing certificate must also be provided in chain. The precertificate
+// should be at chain[0], and its issuer at chain[1]. For this case, set
+// embedded to false.
+// - X.509 Certificate containing the SCT embedded within it:
+// If the SCT you wish to verify is embedded within the certificate you
+// are providing at chain[0], set embedded to true. VerifySCT will
+// verify the provided SCT by building the corresponding precertificate.
+// VerifySCT will return an error if the provided SCT cannot be found
+// embedded within chain[0]. As with the precertificate case, the issuing
+// certificate must also be provided in chain. The certificate containing
+// the embedded SCT should be at chain[0], and its issuer at chain[1].
+func VerifySCT(pubKey crypto.PublicKey, chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) error {
+ s, err := ct.NewSignatureVerifier(pubKey)
+ if err != nil {
+ return fmt.Errorf("error creating signature verifier: %s", err)
+ }
+
+ return VerifySCTWithVerifier(s, chain, sct, embedded)
+}
+
+// VerifySCTWithVerifier takes a ct.SignatureVerifier, a certificate chain, and
+// an SCT and verifies whether the SCT is a valid SCT for the certificate at
+// chain[0], signed by the Log whose public key was used to set up the
+// ct.SignatureVerifier. If the SCT does not verify, an error will be returned.
+//
+// This function can be used with three different types of leaf certificate:
+// - X.509 Certificate:
+// If using this function to verify an SCT for a normal X.509 certificate
+// then it is enough to just provide the end entity certificate in chain.
+// This case assumes that the SCT being provided is not embedded within
+// the leaf certificate provided, i.e. the certificate is what was
+// submitted to the Certificate Transparency Log in order to obtain the
+// SCT. For this case, set embedded to false.
+// - Precertificate:
+// If using this function to verify an SCT for a precertificate then the
+// issuing certificate must also be provided in chain. The precertificate
+// should be at chain[0], and its issuer at chain[1]. For this case, set
+// embedded to false.
+// - X.509 Certificate containing the SCT embedded within it:
+// If the SCT you wish to verify is embedded within the certificate you
+// are providing at chain[0], set embedded to true. VerifySCT will
+// verify the provided SCT by building the corresponding precertificate.
+// VerifySCT will return an error if the provided SCT cannot be found
+// embedded within chain[0]. As with the precertificate case, the issuing
+// certificate must also be provided in chain. The certificate containing
+// the embedded SCT should be at chain[0], and its issuer at chain[1].
+func VerifySCTWithVerifier(sv *ct.SignatureVerifier, chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) error {
+ if sv == nil {
+ return errors.New("ct.SignatureVerifier is nil")
+ }
+
+ leaf, err := createLeaf(chain, sct, embedded)
+ if err != nil {
+ return err
+ }
+
+ return sv.VerifySCTSignature(*sct, ct.LogEntry{Leaf: *leaf})
+}
+
+func createLeaf(chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) (*ct.MerkleTreeLeaf, error) {
+ if len(chain) == 0 {
+ return nil, errors.New("chain is empty")
+ }
+ if sct == nil {
+ return nil, errors.New("sct is nil")
+ }
+
+ if embedded {
+ sctPresent, err := ContainsSCT(chain[0], sct)
+ if err != nil {
+ return nil, fmt.Errorf("error checking for SCT in leaf certificate: %s", err)
+ }
+ if !sctPresent {
+ return nil, errors.New("SCT provided is not embedded within leaf certificate")
+ }
+ }
+
+ certType := ct.X509LogEntryType
+ if chain[0].IsPrecertificate() || embedded {
+ certType = ct.PrecertLogEntryType
+ }
+
+ var leaf *ct.MerkleTreeLeaf
+ var err error
+ if embedded {
+ leaf, err = ct.MerkleTreeLeafForEmbeddedSCT(chain, sct.Timestamp)
+ } else {
+ leaf, err = ct.MerkleTreeLeafFromChain(chain, certType, sct.Timestamp)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("error creating MerkleTreeLeaf: %s", err)
+ }
+ return leaf, nil
+}
+
+// ContainsSCT checks to see whether the given SCT is embedded within the given
+// certificate.
+func ContainsSCT(cert *x509.Certificate, sct *ct.SignedCertificateTimestamp) (bool, error) {
+ if cert == nil || sct == nil {
+ return false, nil
+ }
+
+ sctBytes, err := tls.Marshal(*sct)
+ if err != nil {
+ return false, fmt.Errorf("error tls.Marshalling SCT: %s", err)
+ }
+ for _, s := range cert.SCTList.SCTList {
+ if bytes.Equal(sctBytes, s.Val) {
+ return true, nil
+ }
+ }
+ return false, nil
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/ctutil/loginfo.go b/vendor/github.com/google/certificate-transparency-go/ctutil/loginfo.go
new file mode 100644
index 00000000000..83d9739c7e5
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/ctutil/loginfo.go
@@ -0,0 +1,174 @@
+// Copyright 2018 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ctutil
+
+import (
+ "context"
+ "crypto/sha256"
+ "fmt"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+
+ ct "github.com/google/certificate-transparency-go"
+ "github.com/google/certificate-transparency-go/client"
+ "github.com/google/certificate-transparency-go/jsonclient"
+ "github.com/google/certificate-transparency-go/loglist3"
+ "github.com/google/certificate-transparency-go/x509"
+ "github.com/transparency-dev/merkle/proof"
+ "github.com/transparency-dev/merkle/rfc6962"
+)
+
+// LogInfo holds the objects needed to perform per-log verification and
+// validation of SCTs.
+type LogInfo struct {
+ Description string
+ Client client.CheckLogClient
+ MMD time.Duration
+ Verifier *ct.SignatureVerifier
+ PublicKey []byte
+
+ mu sync.RWMutex
+ lastSTH *ct.SignedTreeHead
+}
+
+// NewLogInfo builds a LogInfo object based on a log list entry.
+func NewLogInfo(log *loglist3.Log, hc *http.Client) (*LogInfo, error) {
+ url := log.URL
+ if !strings.HasPrefix(url, "https://") {
+ url = "https://" + url
+ }
+ lc, err := client.New(url, hc, jsonclient.Options{PublicKeyDER: log.Key, UserAgent: "ct-go-logclient"})
+ if err != nil {
+ return nil, fmt.Errorf("failed to create client for log %q: %v", log.Description, err)
+ }
+ return newLogInfo(log, lc)
+}
+
+func newLogInfo(log *loglist3.Log, lc client.CheckLogClient) (*LogInfo, error) {
+ logKey, err := x509.ParsePKIXPublicKey(log.Key)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse public key data for log %q: %v", log.Description, err)
+ }
+ verifier, err := ct.NewSignatureVerifier(logKey)
+ if err != nil {
+ return nil, fmt.Errorf("failed to build verifier log %q: %v", log.Description, err)
+ }
+ mmd := time.Duration(log.MMD) * time.Second
+ return &LogInfo{
+ Description: log.Description,
+ Client: lc,
+ MMD: mmd,
+ Verifier: verifier,
+ PublicKey: log.Key,
+ }, nil
+}
+
+// LogInfoByHash holds LogInfo objects index by the SHA-256 hash of the log's public key.
+type LogInfoByHash map[[sha256.Size]byte]*LogInfo
+
+// LogInfoByKeyHash builds a map of LogInfo objects indexed by their key hashes.
+func LogInfoByKeyHash(ll *loglist3.LogList, hc *http.Client) (LogInfoByHash, error) {
+ return logInfoByKeyHash(ll, hc, NewLogInfo)
+}
+
+func logInfoByKeyHash(ll *loglist3.LogList, hc *http.Client, infoFactory func(*loglist3.Log, *http.Client) (*LogInfo, error)) (map[[sha256.Size]byte]*LogInfo, error) {
+ result := make(map[[sha256.Size]byte]*LogInfo)
+ for _, operator := range ll.Operators {
+ for _, log := range operator.Logs {
+ h := sha256.Sum256(log.Key)
+ li, err := infoFactory(log, hc)
+ if err != nil {
+ return nil, err
+ }
+ result[h] = li
+ }
+ }
+ return result, nil
+}
+
+// LastSTH returns the last STH known for the log.
+func (li *LogInfo) LastSTH() *ct.SignedTreeHead {
+ li.mu.RLock()
+ defer li.mu.RUnlock()
+ return li.lastSTH
+}
+
+// SetSTH sets the last STH known for the log.
+func (li *LogInfo) SetSTH(sth *ct.SignedTreeHead) {
+ li.mu.Lock()
+ defer li.mu.Unlock()
+ li.lastSTH = sth
+}
+
+// VerifySCTSignature checks the signature in the SCT matches the given leaf (adjusted for the
+// timestamp in the SCT) and log.
+func (li *LogInfo) VerifySCTSignature(sct ct.SignedCertificateTimestamp, leaf ct.MerkleTreeLeaf) error {
+ leaf.TimestampedEntry.Timestamp = sct.Timestamp
+ if err := li.Verifier.VerifySCTSignature(sct, ct.LogEntry{Leaf: leaf}); err != nil {
+ return fmt.Errorf("failed to verify SCT signature from log %q: %v", li.Description, err)
+ }
+ return nil
+}
+
+// VerifyInclusionLatest checks that the given Merkle tree leaf, adjusted for the provided timestamp,
+// is present in the latest known tree size of the log. If no tree size for the log is known, it will
+// be queried. On success, returns the index of the leaf in the log.
+func (li *LogInfo) VerifyInclusionLatest(ctx context.Context, leaf ct.MerkleTreeLeaf, timestamp uint64) (int64, error) {
+ sth := li.LastSTH()
+ if sth == nil {
+ var err error
+ sth, err = li.Client.GetSTH(ctx)
+ if err != nil {
+ return -1, fmt.Errorf("failed to get current STH for %q log: %v", li.Description, err)
+ }
+ li.SetSTH(sth)
+ }
+ return li.VerifyInclusionAt(ctx, leaf, timestamp, sth.TreeSize, sth.SHA256RootHash[:])
+}
+
+// VerifyInclusion checks that the given Merkle tree leaf, adjusted for the provided timestamp,
+// is present in the current tree size of the log. On success, returns the index of the leaf
+// in the log.
+func (li *LogInfo) VerifyInclusion(ctx context.Context, leaf ct.MerkleTreeLeaf, timestamp uint64) (int64, error) {
+ sth, err := li.Client.GetSTH(ctx)
+ if err != nil {
+ return -1, fmt.Errorf("failed to get current STH for %q log: %v", li.Description, err)
+ }
+ li.SetSTH(sth)
+ return li.VerifyInclusionAt(ctx, leaf, timestamp, sth.TreeSize, sth.SHA256RootHash[:])
+}
+
+// VerifyInclusionAt checks that the given Merkle tree leaf, adjusted for the provided timestamp,
+// is present in the given tree size & root hash of the log. On success, returns the index of the
+// leaf in the log.
+func (li *LogInfo) VerifyInclusionAt(ctx context.Context, leaf ct.MerkleTreeLeaf, timestamp, treeSize uint64, rootHash []byte) (int64, error) {
+ leaf.TimestampedEntry.Timestamp = timestamp
+ leafHash, err := ct.LeafHashForLeaf(&leaf)
+ if err != nil {
+ return -1, fmt.Errorf("failed to create leaf hash: %v", err)
+ }
+
+ rsp, err := li.Client.GetProofByHash(ctx, leafHash[:], treeSize)
+ if err != nil {
+ return -1, fmt.Errorf("failed to GetProofByHash(sct,size=%d): %v", treeSize, err)
+ }
+
+ if err := proof.VerifyInclusion(rfc6962.DefaultHasher, uint64(rsp.LeafIndex), treeSize, leafHash[:], rsp.AuditPath, rootHash); err != nil {
+ return -1, fmt.Errorf("failed to verify inclusion proof at size %d: %v", treeSize, err)
+ }
+ return rsp.LeafIndex, nil
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/gossip/minimal/x509ext/x509ext.go b/vendor/github.com/google/certificate-transparency-go/gossip/minimal/x509ext/x509ext.go
new file mode 100644
index 00000000000..540e717d74e
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/gossip/minimal/x509ext/x509ext.go
@@ -0,0 +1,92 @@
+// Copyright 2018 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package x509ext holds extensions types and values for minimal gossip.
+package x509ext
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/google/certificate-transparency-go/asn1"
+ "github.com/google/certificate-transparency-go/tls"
+ "github.com/google/certificate-transparency-go/x509"
+
+ ct "github.com/google/certificate-transparency-go"
+)
+
+// OIDExtensionCTSTH is the OID value for an X.509 extension that holds
+// a log STH value.
+// TODO(drysdale): get an official OID value
+var OIDExtensionCTSTH = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 5}
+
+// OIDExtKeyUsageCTMinimalGossip is the OID value for an extended key usage
+// (EKU) that indicates a leaf certificate is used for the validation of STH
+// values from public CT logs.
+// TODO(drysdale): get an official OID value
+var OIDExtKeyUsageCTMinimalGossip = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 6}
+
+// LogSTHInfo is the structure that gets TLS-encoded into the X.509 extension
+// identified by OIDExtensionCTSTH.
+type LogSTHInfo struct {
+ LogURL []byte `tls:"maxlen:255"`
+ Version tls.Enum `tls:"maxval:255"`
+ TreeSize uint64
+ Timestamp uint64
+ SHA256RootHash ct.SHA256Hash
+ TreeHeadSignature ct.DigitallySigned
+}
+
+// LogSTHInfoFromCert retrieves the STH information embedded in a certificate.
+func LogSTHInfoFromCert(cert *x509.Certificate) (*LogSTHInfo, error) {
+ for _, ext := range cert.Extensions {
+ if ext.Id.Equal(OIDExtensionCTSTH) {
+ var sthInfo LogSTHInfo
+ rest, err := tls.Unmarshal(ext.Value, &sthInfo)
+ if err != nil {
+ return nil, fmt.Errorf("failed to unmarshal STH: %v", err)
+ } else if len(rest) > 0 {
+ return nil, fmt.Errorf("trailing data (%d bytes) after STH", len(rest))
+ }
+ return &sthInfo, nil
+ }
+ }
+ return nil, errors.New("no STH extension found")
+}
+
+// HasSTHInfo indicates whether a certificate has embedded STH information.
+func HasSTHInfo(cert *x509.Certificate) bool {
+ for _, ext := range cert.Extensions {
+ if ext.Id.Equal(OIDExtensionCTSTH) {
+ return true
+ }
+ }
+ return false
+}
+
+// STHFromCert retrieves the STH embedded in a certificate; note the returned STH
+// does not have the LogID field filled in.
+func STHFromCert(cert *x509.Certificate) (*ct.SignedTreeHead, error) {
+ sthInfo, err := LogSTHInfoFromCert(cert)
+ if err != nil {
+ return nil, err
+ }
+ return &ct.SignedTreeHead{
+ Version: ct.Version(sthInfo.Version),
+ TreeSize: sthInfo.TreeSize,
+ Timestamp: sthInfo.Timestamp,
+ SHA256RootHash: sthInfo.SHA256RootHash,
+ TreeHeadSignature: sthInfo.TreeHeadSignature,
+ }, nil
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go b/vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go
new file mode 100644
index 00000000000..30932f30d1a
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go
@@ -0,0 +1,72 @@
+// Copyright 2017 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jsonclient
+
+import (
+ "sync"
+ "time"
+)
+
+type backoff struct {
+ mu sync.RWMutex
+ multiplier uint
+ notBefore time.Time
+}
+
+const (
+ // maximum backoff is 2^(maxMultiplier-1) = 128 seconds
+ maxMultiplier = 8
+)
+
+func (b *backoff) set(override *time.Duration) time.Duration {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if b.notBefore.After(time.Now()) {
+ if override != nil {
+ // If existing backoff is set but override would be longer than
+ // it then set it to that.
+ notBefore := time.Now().Add(*override)
+ if notBefore.After(b.notBefore) {
+ b.notBefore = notBefore
+ }
+ }
+ return time.Until(b.notBefore)
+ }
+ var wait time.Duration
+ if override != nil {
+ wait = *override
+ } else {
+ if b.multiplier < maxMultiplier {
+ b.multiplier++
+ }
+ wait = time.Second * time.Duration(1<<(b.multiplier-1))
+ }
+ b.notBefore = time.Now().Add(wait)
+ return wait
+}
+
+func (b *backoff) decreaseMultiplier() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if b.multiplier > 0 {
+ b.multiplier--
+ }
+}
+
+func (b *backoff) until() time.Time {
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+ return b.notBefore
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/jsonclient/client.go b/vendor/github.com/google/certificate-transparency-go/jsonclient/client.go
new file mode 100644
index 00000000000..edb8f919afe
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/jsonclient/client.go
@@ -0,0 +1,336 @@
+// Copyright 2016 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package jsonclient provides a simple client for fetching and parsing
+// JSON CT structures from a log.
+package jsonclient
+
+import (
+ "bytes"
+ "context"
+ "crypto"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math/rand"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ ct "github.com/google/certificate-transparency-go"
+ "github.com/google/certificate-transparency-go/x509"
+)
+
+const maxJitter = 250 * time.Millisecond
+
+type backoffer interface {
+ // set adjusts/increases the current backoff interval (typically on retryable failure);
+ // if the optional parameter is provided, this will be used as the interval if it is greater
+ // than the currently set interval. Returns the current wait period so that it can be
+ // logged along with any error message.
+ set(*time.Duration) time.Duration
+ // decreaseMultiplier reduces the current backoff multiplier, typically on success.
+ decreaseMultiplier()
+ // until returns the time until which the client should wait before making a request,
+ // it may be in the past in which case it should be ignored.
+ until() time.Time
+}
+
+// JSONClient provides common functionality for interacting with a JSON server
+// that uses cryptographic signatures.
+type JSONClient struct {
+ uri string // the base URI of the server. e.g. https://ct.googleapis/pilot
+ httpClient *http.Client // used to interact with the server via HTTP
+ Verifier *ct.SignatureVerifier // nil for no verification (e.g. no public key available)
+ logger Logger // interface to use for logging warnings and errors
+ backoff backoffer // object used to store and calculate backoff information
+ userAgent string // If set, this is sent as the UserAgent header.
+ authorization string // If set, this is sent as the Authorization header.
+}
+
+// Logger is a simple logging interface used to log internal errors and warnings
+type Logger interface {
+ // Printf formats and logs a message
+ Printf(string, ...interface{})
+}
+
+// Options are the options for creating a new JSONClient.
+type Options struct {
+ // Interface to use for logging warnings and errors, if nil the
+ // standard library log package will be used.
+ Logger Logger
+ // PEM format public key to use for signature verification.
+ PublicKey string
+ // DER format public key to use for signature verification.
+ PublicKeyDER []byte
+ // UserAgent, if set, will be sent as the User-Agent header with each request.
+ UserAgent string
+ // If set, this is sent as the Authorization header with each request.
+ Authorization string
+}
+
+// ParsePublicKey parses and returns the public key contained in opts.
+// If both opts.PublicKey and opts.PublicKeyDER are set, PublicKeyDER is used.
+// If neither is set, nil will be returned.
+func (opts *Options) ParsePublicKey() (crypto.PublicKey, error) {
+ if len(opts.PublicKeyDER) > 0 {
+ return x509.ParsePKIXPublicKey(opts.PublicKeyDER)
+ }
+
+ if opts.PublicKey != "" {
+ pubkey, _ /* keyhash */, rest, err := ct.PublicKeyFromPEM([]byte(opts.PublicKey))
+ if err != nil {
+ return nil, err
+ }
+ if len(rest) > 0 {
+ return nil, errors.New("extra data found after PEM key decoded")
+ }
+ return pubkey, nil
+ }
+
+ return nil, nil
+}
+
+type basicLogger struct{}
+
+func (bl *basicLogger) Printf(msg string, args ...interface{}) {
+ log.Printf(msg, args...)
+}
+
+// RspError represents an error that occurred when processing a response from a server,
+// and also includes key details from the http.Response that triggered the error.
+type RspError struct {
+ Err error
+ StatusCode int
+ Body []byte
+}
+
+// Error formats the RspError instance, focusing on the error.
+func (e RspError) Error() string {
+ return e.Err.Error()
+}
+
+// New constructs a new JSONClient instance, for the given base URI, using the
+// given http.Client object (if provided) and the Options object.
+// If opts does not specify a public key, signatures will not be verified.
+func New(uri string, hc *http.Client, opts Options) (*JSONClient, error) {
+ pubkey, err := opts.ParsePublicKey()
+ if err != nil {
+ return nil, fmt.Errorf("invalid public key: %v", err)
+ }
+
+ var verifier *ct.SignatureVerifier
+ if pubkey != nil {
+ var err error
+ verifier, err = ct.NewSignatureVerifier(pubkey)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if hc == nil {
+ hc = new(http.Client)
+ }
+ logger := opts.Logger
+ if logger == nil {
+ logger = &basicLogger{}
+ }
+ return &JSONClient{
+ uri: strings.TrimRight(uri, "/"),
+ httpClient: hc,
+ Verifier: verifier,
+ logger: logger,
+ backoff: &backoff{},
+ userAgent: opts.UserAgent,
+ authorization: opts.Authorization,
+ }, nil
+}
+
+// BaseURI returns the base URI that the JSONClient makes queries to.
+func (c *JSONClient) BaseURI() string {
+ return c.uri
+}
+
+// GetAndParse makes a HTTP GET call to the given path, and attempts to parse
+// the response as a JSON representation of the rsp structure. Returns the
+// http.Response, the body of the response, and an error (which may be of
+// type RspError if the HTTP response was available). It returns an error
+// if the response status code is not 200 OK.
+func (c *JSONClient) GetAndParse(ctx context.Context, path string, params map[string]string, rsp interface{}) (*http.Response, []byte, error) {
+ if ctx == nil {
+ return nil, nil, errors.New("context.Context required")
+ }
+ // Build a GET request with URL-encoded parameters.
+ vals := url.Values{}
+ for k, v := range params {
+ vals.Add(k, v)
+ }
+ fullURI := fmt.Sprintf("%s%s?%s", c.uri, path, vals.Encode())
+ httpReq, err := http.NewRequestWithContext(ctx, http.MethodGet, fullURI, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ if len(c.userAgent) != 0 {
+ httpReq.Header.Set("User-Agent", c.userAgent)
+ }
+ if len(c.authorization) != 0 {
+ httpReq.Header.Add("Authorization", c.authorization)
+ }
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return nil, nil, err
+ }
+ body, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return nil, nil, RspError{Err: fmt.Errorf("failed to read response body: %w", err), StatusCode: httpRsp.StatusCode, Body: body}
+ }
+ if err := httpRsp.Body.Close(); err != nil {
+ return nil, nil, RspError{Err: fmt.Errorf("failed to close response body: %w", err), StatusCode: httpRsp.StatusCode, Body: body}
+ }
+ if httpRsp.StatusCode != http.StatusOK {
+ return nil, nil, RspError{Err: fmt.Errorf("got HTTP Status %q", httpRsp.Status), StatusCode: httpRsp.StatusCode, Body: body}
+ }
+
+ if err := json.NewDecoder(bytes.NewReader(body)).Decode(rsp); err != nil {
+ return nil, nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
+ }
+
+ return httpRsp, body, nil
+}
+
+// PostAndParse makes a HTTP POST call to the given path, including the request
+// parameters, and attempts to parse the response as a JSON representation of
+// the rsp structure. Returns the http.Response, the body of the response, and
+// an error (which may be of type RspError if the HTTP response was available).
+// It does NOT return an error if the response status code is not 200 OK.
+func (c *JSONClient) PostAndParse(ctx context.Context, path string, req, rsp interface{}) (*http.Response, []byte, error) {
+ if ctx == nil {
+ return nil, nil, errors.New("context.Context required")
+ }
+ // Build a POST request with JSON body.
+ postBody, err := json.Marshal(req)
+ if err != nil {
+ return nil, nil, err
+ }
+ fullURI := fmt.Sprintf("%s%s", c.uri, path)
+ httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, fullURI, bytes.NewReader(postBody))
+ if err != nil {
+ return nil, nil, err
+ }
+ if len(c.userAgent) != 0 {
+ httpReq.Header.Set("User-Agent", c.userAgent)
+ }
+ if len(c.authorization) != 0 {
+ httpReq.Header.Add("Authorization", c.authorization)
+ }
+ httpReq.Header.Set("Content-Type", "application/json")
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return nil, nil, err
+ }
+ body, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ _ = httpRsp.Body.Close()
+ return nil, nil, err
+ }
+ if err := httpRsp.Body.Close(); err != nil {
+ return nil, nil, err
+ }
+ if httpRsp.Request.Method != http.MethodPost {
+ // https://developer.mozilla.org/en-US/docs/Web/HTTP/Redirections#permanent_redirections
+ return nil, nil, fmt.Errorf("POST request to %q was converted to %s request to %q", fullURI, httpRsp.Request.Method, httpRsp.Request.URL)
+ }
+
+ if httpRsp.StatusCode == http.StatusOK {
+ if err := json.Unmarshal(body, &rsp); err != nil {
+ return nil, nil, RspError{StatusCode: httpRsp.StatusCode, Body: body, Err: err}
+ }
+ }
+ return httpRsp, body, nil
+}
+
+// waitForBackoff blocks until the defined backoff interval or context has expired, if the returned
+// not before time is in the past it returns immediately.
+func (c *JSONClient) waitForBackoff(ctx context.Context) error {
+ dur := time.Until(c.backoff.until().Add(time.Millisecond * time.Duration(rand.Intn(int(maxJitter.Seconds()*1000)))))
+ if dur < 0 {
+ dur = 0
+ }
+ backoffTimer := time.NewTimer(dur)
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-backoffTimer.C:
+ }
+ return nil
+}
+
+// PostAndParseWithRetry makes a HTTP POST call, but retries (with backoff) on
+// retryable errors; the caller should set a deadline on the provided context
+// to prevent infinite retries. Return values are as for PostAndParse.
+func (c *JSONClient) PostAndParseWithRetry(ctx context.Context, path string, req, rsp interface{}) (*http.Response, []byte, error) {
+ if ctx == nil {
+ return nil, nil, errors.New("context.Context required")
+ }
+ for {
+ httpRsp, body, err := c.PostAndParse(ctx, path, req, rsp)
+ if err != nil {
+ // Don't retry context errors.
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return nil, nil, err
+ }
+ wait := c.backoff.set(nil)
+ c.logger.Printf("Request to %s failed, backing-off %s: %s", c.uri, wait, err)
+ } else {
+ switch httpRsp.StatusCode {
+ case http.StatusOK:
+ return httpRsp, body, nil
+ case http.StatusRequestTimeout:
+ // Request timeout, retry immediately
+ c.logger.Printf("Request to %s timed out, retrying immediately", c.uri)
+ case http.StatusServiceUnavailable:
+ fallthrough
+ case http.StatusTooManyRequests:
+ var backoff *time.Duration
+ // Retry-After may be either a number of seconds as a int or a RFC 1123
+ // date string (RFC 7231 Section 7.1.3)
+ if retryAfter := httpRsp.Header.Get("Retry-After"); retryAfter != "" {
+ if seconds, err := strconv.Atoi(retryAfter); err == nil {
+ b := time.Duration(seconds) * time.Second
+ backoff = &b
+ } else if date, err := time.Parse(time.RFC1123, retryAfter); err == nil {
+ b := time.Until(date)
+ backoff = &b
+ }
+ }
+ wait := c.backoff.set(backoff)
+ c.logger.Printf("Request to %s failed, backing-off for %s: got HTTP status %s", c.uri, wait, httpRsp.Status)
+ default:
+ return nil, nil, RspError{
+ StatusCode: httpRsp.StatusCode,
+ Body: body,
+ Err: fmt.Errorf("got HTTP status %q", httpRsp.Status)}
+ }
+ }
+ if err := c.waitForBackoff(ctx); err != nil {
+ return nil, nil, err
+ }
+ }
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/loglist3/logfilter.go b/vendor/github.com/google/certificate-transparency-go/loglist3/logfilter.go
new file mode 100644
index 00000000000..9ac54bae912
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/loglist3/logfilter.go
@@ -0,0 +1,129 @@
+// Copyright 2022 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package loglist3
+
+import (
+ "github.com/google/certificate-transparency-go/x509"
+ "github.com/google/certificate-transparency-go/x509util"
+)
+
+// LogRoots maps Log-URLs (stated at LogList) to the pools of their accepted
+// root-certificates.
+type LogRoots map[string]*x509util.PEMCertPool
+
+// Compatible creates a new LogList containing only Logs matching the temporal,
+// root-acceptance and Log-status conditions.
+func (ll *LogList) Compatible(cert *x509.Certificate, certRoot *x509.Certificate, roots LogRoots) LogList {
+ active := ll.TemporallyCompatible(cert)
+ // Do not check root compatbility if roots are not being provided.
+ if certRoot == nil {
+ return active
+ }
+ return active.RootCompatible(certRoot, roots)
+}
+
+// SelectByStatus creates a new LogList containing only logs with status
+// provided from the original.
+func (ll *LogList) SelectByStatus(lstats []LogStatus) LogList {
+ var active LogList
+ for _, op := range ll.Operators {
+ activeOp := *op
+ activeOp.Logs = []*Log{}
+ for _, l := range op.Logs {
+ for _, lstat := range lstats {
+ if l.State.LogStatus() == lstat {
+ activeOp.Logs = append(activeOp.Logs, l)
+ break
+ }
+ }
+ }
+ if len(activeOp.Logs) > 0 {
+ active.Operators = append(active.Operators, &activeOp)
+ }
+ }
+ return active
+}
+
+// RootCompatible creates a new LogList containing only the logs of original
+// LogList that are compatible with the provided cert, according to
+// the passed in collection of per-log roots. Logs that are missing from
+// the collection are treated as always compatible and included, even if
+// an empty cert root is passed in.
+// Cert-root when provided is expected to be CA-cert.
+func (ll *LogList) RootCompatible(certRoot *x509.Certificate, roots LogRoots) LogList {
+ var compatible LogList
+
+ // Check whether root is a CA-cert.
+ if certRoot != nil && !certRoot.IsCA {
+ // Compatible method expects fully rooted chain, while last cert of the chain provided is not root.
+ // Proceed anyway.
+ return compatible
+ }
+
+ for _, op := range ll.Operators {
+ compatibleOp := *op
+ compatibleOp.Logs = []*Log{}
+ for _, l := range op.Logs {
+ // If root set is not defined, we treat Log as compatible assuming no
+ // knowledge of its roots.
+ if _, ok := roots[l.URL]; !ok {
+ compatibleOp.Logs = append(compatibleOp.Logs, l)
+ continue
+ }
+
+ if certRoot == nil {
+ continue
+ }
+
+ // Check root is accepted.
+ if roots[l.URL].Included(certRoot) {
+ compatibleOp.Logs = append(compatibleOp.Logs, l)
+ }
+ }
+ if len(compatibleOp.Logs) > 0 {
+ compatible.Operators = append(compatible.Operators, &compatibleOp)
+ }
+ }
+ return compatible
+}
+
+// TemporallyCompatible creates a new LogList containing only the logs of
+// original LogList that are compatible with the provided cert, according to
+// NotAfter and TemporalInterval matching.
+// Returns empty LogList if nil-cert is provided.
+func (ll *LogList) TemporallyCompatible(cert *x509.Certificate) LogList {
+ var compatible LogList
+ if cert == nil {
+ return compatible
+ }
+
+ for _, op := range ll.Operators {
+ compatibleOp := *op
+ compatibleOp.Logs = []*Log{}
+ for _, l := range op.Logs {
+ if l.TemporalInterval == nil {
+ compatibleOp.Logs = append(compatibleOp.Logs, l)
+ continue
+ }
+ if cert.NotAfter.Before(l.TemporalInterval.EndExclusive) && (cert.NotAfter.After(l.TemporalInterval.StartInclusive) || cert.NotAfter.Equal(l.TemporalInterval.StartInclusive)) {
+ compatibleOp.Logs = append(compatibleOp.Logs, l)
+ }
+ }
+ if len(compatibleOp.Logs) > 0 {
+ compatible.Operators = append(compatible.Operators, &compatibleOp)
+ }
+ }
+ return compatible
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/loglist3/loglist3.go b/vendor/github.com/google/certificate-transparency-go/loglist3/loglist3.go
new file mode 100644
index 00000000000..c5e94f1874f
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/loglist3/loglist3.go
@@ -0,0 +1,427 @@
+// Copyright 2022 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package loglist3 allows parsing and searching of the master CT Log list.
+// It expects the log list to conform to the v3 schema.
+package loglist3
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "strings"
+ "time"
+ "unicode"
+
+ "github.com/google/certificate-transparency-go/tls"
+)
+
+const (
+ // LogListURL has the master URL for Google Chrome's log list.
+ LogListURL = "https://www.gstatic.com/ct/log_list/v3/log_list.json"
+ // LogListSignatureURL has the URL for the signature over Google Chrome's log list.
+ LogListSignatureURL = "https://www.gstatic.com/ct/log_list/v3/log_list.sig"
+ // AllLogListURL has the URL for the list of all known logs.
+ AllLogListURL = "https://www.gstatic.com/ct/log_list/v3/all_logs_list.json"
+ // AllLogListSignatureURL has the URL for the signature over the list of all known logs.
+ AllLogListSignatureURL = "https://www.gstatic.com/ct/log_list/v3/all_logs_list.sig"
+)
+
+// Manually mapped from https://www.gstatic.com/ct/log_list/v3/log_list_schema.json
+
+// LogList holds a collection of CT logs, grouped by operator.
+type LogList struct {
+ // IsAllLogs is set to true if the list contains all known logs, not
+ // only usable ones.
+ IsAllLogs bool `json:"is_all_logs,omitempty"`
+ // Version is the version of the log list.
+ Version string `json:"version,omitempty"`
+ // LogListTimestamp is the time at which the log list was published.
+ LogListTimestamp time.Time `json:"log_list_timestamp,omitempty"`
+ // Operators is a list of CT log operators and the logs they operate.
+ Operators []*Operator `json:"operators"`
+}
+
+// Operator holds a collection of CT logs run by the same organisation.
+// It also provides information about that organisation, e.g. contact details.
+type Operator struct {
+ // Name is the name of the CT log operator.
+ Name string `json:"name"`
+ // Email lists the email addresses that can be used to contact this log
+ // operator.
+ Email []string `json:"email"`
+ // Logs is a list of RFC 6962 CT logs run by this operator.
+ Logs []*Log `json:"logs"`
+ // TiledLogs is a list of Static CT API CT logs run by this operator.
+ TiledLogs []*TiledLog `json:"tiled_logs"`
+}
+
+// Log describes a single RFC 6962 CT log. It is nearly the same as the TiledLog struct,
+// but has a single URL field instead of SubmissionURL and MonitoringURL fields.
+type Log struct {
+ // Description is a human-readable string that describes the log.
+ Description string `json:"description,omitempty"`
+ // LogID is the SHA-256 hash of the log's public key.
+ LogID []byte `json:"log_id"`
+ // Key is the public key with which signatures can be verified.
+ Key []byte `json:"key"`
+ // URL is the address of the HTTPS API.
+ URL string `json:"url"`
+ // DNS is the address of the DNS API.
+ DNS string `json:"dns,omitempty"`
+ // MMD is the Maximum Merge Delay, in seconds. All submitted
+ // certificates must be incorporated into the log within this time.
+ MMD int32 `json:"mmd"`
+ // PreviousOperators is a list of previous operators and the timestamp
+ // of when they stopped running the log.
+ PreviousOperators []*PreviousOperator `json:"previous_operators,omitempty"`
+ // State is the current state of the log, from the perspective of the
+ // log list distributor.
+ State *LogStates `json:"state,omitempty"`
+ // TemporalInterval, if set, indicates that this log only accepts
+ // certificates with a NotAfter date in this time range.
+ TemporalInterval *TemporalInterval `json:"temporal_interval,omitempty"`
+ // Type indicates the purpose of this log, e.g. "test" or "prod".
+ Type string `json:"log_type,omitempty"`
+}
+
+// TiledLog describes a Static CT API log. It is nearly the same as the Log struct,
+// but has both SubmissionURL and MonitoringURL fields instead of a single URL field.
+type TiledLog struct {
+ // Description is a human-readable string that describes the log.
+ Description string `json:"description,omitempty"`
+ // LogID is the SHA-256 hash of the log's public key.
+ LogID []byte `json:"log_id"`
+ // Key is the public key with which signatures can be verified.
+ Key []byte `json:"key"`
+ // SubmissionURL
+ SubmissionURL string `json:"submission_url"`
+ // MonitoringURL
+ MonitoringURL string `json:"monitoring_url"`
+ // DNS is the address of the DNS API.
+ DNS string `json:"dns,omitempty"`
+ // MMD is the Maximum Merge Delay, in seconds. All submitted
+ // certificates must be incorporated into the log within this time.
+ MMD int32 `json:"mmd"`
+ // PreviousOperators is a list of previous operators and the timestamp
+ // of when they stopped running the log.
+ PreviousOperators []*PreviousOperator `json:"previous_operators,omitempty"`
+ // State is the current state of the log, from the perspective of the
+ // log list distributor.
+ State *LogStates `json:"state,omitempty"`
+ // TemporalInterval, if set, indicates that this log only accepts
+ // certificates with a NotAfter date in this time range.
+ TemporalInterval *TemporalInterval `json:"temporal_interval,omitempty"`
+ // Type indicates the purpose of this log, e.g. "test" or "prod".
+ Type string `json:"log_type,omitempty"`
+}
+
+// PreviousOperator holds information about a log operator and the time at which
+// they stopped running a log.
+type PreviousOperator struct {
+ // Name is the name of the CT log operator.
+ Name string `json:"name"`
+ // EndTime is the time at which the operator stopped running a log.
+ EndTime time.Time `json:"end_time"`
+}
+
+// TemporalInterval is a time range.
+type TemporalInterval struct {
+ // StartInclusive is the beginning of the time range.
+ StartInclusive time.Time `json:"start_inclusive"`
+ // EndExclusive is just after the end of the time range.
+ EndExclusive time.Time `json:"end_exclusive"`
+}
+
+// LogStatus indicates Log status.
+type LogStatus int
+
+// LogStatus values
+const (
+ UndefinedLogStatus LogStatus = iota
+ PendingLogStatus
+ QualifiedLogStatus
+ UsableLogStatus
+ ReadOnlyLogStatus
+ RetiredLogStatus
+ RejectedLogStatus
+)
+
+//go:generate stringer -type=LogStatus
+
+// LogStates are the states that a CT log can be in, from the perspective of a
+// user agent. Only one should be set - this is the current state.
+type LogStates struct {
+ // Pending indicates that the log is in the "pending" state.
+ Pending *LogState `json:"pending,omitempty"`
+ // Qualified indicates that the log is in the "qualified" state.
+ Qualified *LogState `json:"qualified,omitempty"`
+ // Usable indicates that the log is in the "usable" state.
+ Usable *LogState `json:"usable,omitempty"`
+ // ReadOnly indicates that the log is in the "readonly" state.
+ ReadOnly *ReadOnlyLogState `json:"readonly,omitempty"`
+ // Retired indicates that the log is in the "retired" state.
+ Retired *LogState `json:"retired,omitempty"`
+ // Rejected indicates that the log is in the "rejected" state.
+ Rejected *LogState `json:"rejected,omitempty"`
+}
+
+// LogState contains details on the current state of a CT log.
+type LogState struct {
+ // Timestamp is the time when the state began.
+ Timestamp time.Time `json:"timestamp"`
+}
+
+// ReadOnlyLogState contains details on the current state of a read-only CT log.
+type ReadOnlyLogState struct {
+ LogState
+ // FinalTreeHead is the root hash and tree size at which the CT log was
+ // made read-only. This should never change while the log is read-only.
+ FinalTreeHead TreeHead `json:"final_tree_head"`
+}
+
+// TreeHead is the root hash and tree size of a CT log.
+type TreeHead struct {
+ // SHA256RootHash is the root hash of the CT log's Merkle tree.
+ SHA256RootHash []byte `json:"sha256_root_hash"`
+ // TreeSize is the size of the CT log's Merkle tree.
+ TreeSize int64 `json:"tree_size"`
+}
+
+// LogStatus method returns Log-status enum value for descriptive struct.
+func (ls *LogStates) LogStatus() LogStatus {
+ switch {
+ case ls == nil:
+ return UndefinedLogStatus
+ case ls.Pending != nil:
+ return PendingLogStatus
+ case ls.Qualified != nil:
+ return QualifiedLogStatus
+ case ls.Usable != nil:
+ return UsableLogStatus
+ case ls.ReadOnly != nil:
+ return ReadOnlyLogStatus
+ case ls.Retired != nil:
+ return RetiredLogStatus
+ case ls.Rejected != nil:
+ return RejectedLogStatus
+ default:
+ return UndefinedLogStatus
+ }
+}
+
+// String method returns printable name of the state.
+func (ls *LogStates) String() string {
+ return ls.LogStatus().String()
+}
+
+// Active picks the set-up state. If multiple states are set (not expected) picks one of them.
+func (ls *LogStates) Active() (*LogState, *ReadOnlyLogState) {
+ if ls == nil {
+ return nil, nil
+ }
+ switch {
+ case ls.Pending != nil:
+ return ls.Pending, nil
+ case ls.Qualified != nil:
+ return ls.Qualified, nil
+ case ls.Usable != nil:
+ return ls.Usable, nil
+ case ls.ReadOnly != nil:
+ return nil, ls.ReadOnly
+ case ls.Retired != nil:
+ return ls.Retired, nil
+ case ls.Rejected != nil:
+ return ls.Rejected, nil
+ default:
+ return nil, nil
+ }
+}
+
+// GoogleOperated returns whether Operator is considered to be Google.
+func (op *Operator) GoogleOperated() bool {
+ for _, email := range op.Email {
+ if strings.Contains(email, "google-ct-logs@googlegroups") {
+ return true
+ }
+ }
+ return false
+}
+
+// NewFromJSON creates a LogList from JSON encoded data.
+func NewFromJSON(llData []byte) (*LogList, error) {
+ var ll LogList
+ if err := json.Unmarshal(llData, &ll); err != nil {
+ return nil, fmt.Errorf("failed to parse log list: %v", err)
+ }
+ return &ll, nil
+}
+
+// NewFromSignedJSON creates a LogList from JSON encoded data, checking a
+// signature along the way. The signature data should be provided as the
+// raw signature data.
+func NewFromSignedJSON(llData, rawSig []byte, pubKey crypto.PublicKey) (*LogList, error) {
+ var sigAlgo tls.SignatureAlgorithm
+ switch pkType := pubKey.(type) {
+ case *rsa.PublicKey:
+ sigAlgo = tls.RSA
+ case *ecdsa.PublicKey:
+ sigAlgo = tls.ECDSA
+ default:
+ return nil, fmt.Errorf("unsupported public key type %v", pkType)
+ }
+ tlsSig := tls.DigitallySigned{
+ Algorithm: tls.SignatureAndHashAlgorithm{
+ Hash: tls.SHA256,
+ Signature: sigAlgo,
+ },
+ Signature: rawSig,
+ }
+ if err := tls.VerifySignature(pubKey, llData, tlsSig); err != nil {
+ return nil, fmt.Errorf("failed to verify signature: %v", err)
+ }
+ return NewFromJSON(llData)
+}
+
+// FindLogByName returns all logs whose names contain the given string.
+func (ll *LogList) FindLogByName(name string) []*Log {
+ name = strings.ToLower(name)
+ var results []*Log
+ for _, op := range ll.Operators {
+ for _, log := range op.Logs {
+ if strings.Contains(strings.ToLower(log.Description), name) {
+ results = append(results, log)
+ }
+ }
+ }
+ return results
+}
+
+// FindLogByURL finds the log with the given URL.
+func (ll *LogList) FindLogByURL(url string) *Log {
+ for _, op := range ll.Operators {
+ for _, log := range op.Logs {
+ // Don't count trailing slashes
+ if strings.TrimRight(log.URL, "/") == strings.TrimRight(url, "/") {
+ return log
+ }
+ }
+ }
+ return nil
+}
+
+// FindLogByKeyHash finds the log with the given key hash.
+func (ll *LogList) FindLogByKeyHash(keyhash [sha256.Size]byte) *Log {
+ for _, op := range ll.Operators {
+ for _, log := range op.Logs {
+ if bytes.Equal(log.LogID, keyhash[:]) {
+ return log
+ }
+ }
+ }
+ return nil
+}
+
+// FindLogByKeyHashPrefix finds all logs whose key hash starts with the prefix.
+func (ll *LogList) FindLogByKeyHashPrefix(prefix string) []*Log {
+ var results []*Log
+ for _, op := range ll.Operators {
+ for _, log := range op.Logs {
+ hh := hex.EncodeToString(log.LogID[:])
+ if strings.HasPrefix(hh, prefix) {
+ results = append(results, log)
+ }
+ }
+ }
+ return results
+}
+
+// FindLogByKey finds the log with the given DER-encoded key.
+func (ll *LogList) FindLogByKey(key []byte) *Log {
+ for _, op := range ll.Operators {
+ for _, log := range op.Logs {
+ if bytes.Equal(log.Key[:], key) {
+ return log
+ }
+ }
+ }
+ return nil
+}
+
+var hexDigits = regexp.MustCompile("^[0-9a-fA-F]+$")
+
+// FuzzyFindLog tries to find logs that match the given unspecified input,
+// whose format is unspecified. This generally returns a single log, but
+// if text input that matches multiple log descriptions is provided, then
+// multiple logs may be returned.
+func (ll *LogList) FuzzyFindLog(input string) []*Log {
+ input = strings.Trim(input, " \t")
+ if logs := ll.FindLogByName(input); len(logs) > 0 {
+ return logs
+ }
+ if log := ll.FindLogByURL(input); log != nil {
+ return []*Log{log}
+ }
+ // Try assuming the input is binary data of some form. First base64:
+ if data, err := base64.StdEncoding.DecodeString(input); err == nil {
+ if len(data) == sha256.Size {
+ var hash [sha256.Size]byte
+ copy(hash[:], data)
+ if log := ll.FindLogByKeyHash(hash); log != nil {
+ return []*Log{log}
+ }
+ }
+ if log := ll.FindLogByKey(data); log != nil {
+ return []*Log{log}
+ }
+ }
+ // Now hex, but strip all internal whitespace first.
+ input = stripInternalSpace(input)
+ if data, err := hex.DecodeString(input); err == nil {
+ if len(data) == sha256.Size {
+ var hash [sha256.Size]byte
+ copy(hash[:], data)
+ if log := ll.FindLogByKeyHash(hash); log != nil {
+ return []*Log{log}
+ }
+ }
+ if log := ll.FindLogByKey(data); log != nil {
+ return []*Log{log}
+ }
+ }
+ // Finally, allow hex strings with an odd number of digits.
+ if hexDigits.MatchString(input) {
+ if logs := ll.FindLogByKeyHashPrefix(input); len(logs) > 0 {
+ return logs
+ }
+ }
+
+ return nil
+}
+
+func stripInternalSpace(input string) string {
+ return strings.Map(func(r rune) rune {
+ if !unicode.IsSpace(r) {
+ return r
+ }
+ return -1
+ }, input)
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/loglist3/logstatus_string.go b/vendor/github.com/google/certificate-transparency-go/loglist3/logstatus_string.go
new file mode 100644
index 00000000000..84c7bbdf4e4
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/loglist3/logstatus_string.go
@@ -0,0 +1,29 @@
+// Code generated by "stringer -type=LogStatus"; DO NOT EDIT.
+
+package loglist3
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[UndefinedLogStatus-0]
+ _ = x[PendingLogStatus-1]
+ _ = x[QualifiedLogStatus-2]
+ _ = x[UsableLogStatus-3]
+ _ = x[ReadOnlyLogStatus-4]
+ _ = x[RetiredLogStatus-5]
+ _ = x[RejectedLogStatus-6]
+}
+
+const _LogStatus_name = "UndefinedLogStatusPendingLogStatusQualifiedLogStatusUsableLogStatusReadOnlyLogStatusRetiredLogStatusRejectedLogStatus"
+
+var _LogStatus_index = [...]uint8{0, 18, 34, 52, 67, 84, 100, 117}
+
+func (i LogStatus) String() string {
+ if i < 0 || i >= LogStatus(len(_LogStatus_index)-1) {
+ return "LogStatus(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _LogStatus_name[_LogStatus_index[i]:_LogStatus_index[i+1]]
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/proto_gen.go b/vendor/github.com/google/certificate-transparency-go/proto_gen.go
new file mode 100644
index 00000000000..565c6bbbc82
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/proto_gen.go
@@ -0,0 +1,25 @@
+// Copyright 2021 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ct
+
+// We do the protoc generation here (rather than in the individual directories)
+// in order to work around the newly-enforced rule that all protobuf file "names"
+// must be unique.
+// See https://developers.google.com/protocol-buffers/docs/proto#packages and
+// https://github.com/golang/protobuf/issues/1122
+
+//go:generate sh -c "protoc -I=. -I$(go list -f '{{ .Dir }}' github.com/google/trillian) -I$(go list -f '{{ .Dir }}' github.com/google/certificate-transparency-go) --go_out=paths=source_relative:. trillian/ctfe/configpb/config.proto"
+//go:generate sh -c "protoc -I=. -I$(go list -f '{{ .Dir }}' github.com/google/trillian) -I$(go list -f '{{ .Dir }}' github.com/google/certificate-transparency-go) --go_out=paths=source_relative:. trillian/migrillian/configpb/config.proto"
+//go:generate sh -c "protoc -I=. -I$(go list -f '{{ .Dir }}' github.com/google/certificate-transparency-go) --go_out=paths=source_relative:. client/configpb/multilog.proto"
diff --git a/vendor/github.com/google/certificate-transparency-go/serialization.go b/vendor/github.com/google/certificate-transparency-go/serialization.go
new file mode 100644
index 00000000000..2a6c21ed4cf
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/serialization.go
@@ -0,0 +1,317 @@
+// Copyright 2015 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ct
+
+import (
+ "crypto"
+ "crypto/sha256"
+ "fmt"
+ "time"
+
+ "github.com/google/certificate-transparency-go/tls"
+ "github.com/google/certificate-transparency-go/x509"
+)
+
+// SerializeSCTSignatureInput serializes the passed in sct and log entry into
+// the correct format for signing.
+func SerializeSCTSignatureInput(sct SignedCertificateTimestamp, entry LogEntry) ([]byte, error) {
+ switch sct.SCTVersion {
+ case V1:
+ input := CertificateTimestamp{
+ SCTVersion: sct.SCTVersion,
+ SignatureType: CertificateTimestampSignatureType,
+ Timestamp: sct.Timestamp,
+ EntryType: entry.Leaf.TimestampedEntry.EntryType,
+ Extensions: sct.Extensions,
+ }
+ switch entry.Leaf.TimestampedEntry.EntryType {
+ case X509LogEntryType:
+ input.X509Entry = entry.Leaf.TimestampedEntry.X509Entry
+ case PrecertLogEntryType:
+ input.PrecertEntry = &PreCert{
+ IssuerKeyHash: entry.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash,
+ TBSCertificate: entry.Leaf.TimestampedEntry.PrecertEntry.TBSCertificate,
+ }
+ default:
+ return nil, fmt.Errorf("unsupported entry type %s", entry.Leaf.TimestampedEntry.EntryType)
+ }
+ return tls.Marshal(input)
+ default:
+ return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion)
+ }
+}
+
+// SerializeSTHSignatureInput serializes the passed in STH into the correct
+// format for signing.
+func SerializeSTHSignatureInput(sth SignedTreeHead) ([]byte, error) {
+ switch sth.Version {
+ case V1:
+ if len(sth.SHA256RootHash) != crypto.SHA256.Size() {
+ return nil, fmt.Errorf("invalid TreeHash length, got %d expected %d", len(sth.SHA256RootHash), crypto.SHA256.Size())
+ }
+
+ input := TreeHeadSignature{
+ Version: sth.Version,
+ SignatureType: TreeHashSignatureType,
+ Timestamp: sth.Timestamp,
+ TreeSize: sth.TreeSize,
+ SHA256RootHash: sth.SHA256RootHash,
+ }
+ return tls.Marshal(input)
+ default:
+ return nil, fmt.Errorf("unsupported STH version %d", sth.Version)
+ }
+}
+
+// CreateX509MerkleTreeLeaf generates a MerkleTreeLeaf for an X509 cert
+func CreateX509MerkleTreeLeaf(cert ASN1Cert, timestamp uint64) *MerkleTreeLeaf {
+ return &MerkleTreeLeaf{
+ Version: V1,
+ LeafType: TimestampedEntryLeafType,
+ TimestampedEntry: &TimestampedEntry{
+ Timestamp: timestamp,
+ EntryType: X509LogEntryType,
+ X509Entry: &cert,
+ },
+ }
+}
+
+// MerkleTreeLeafFromRawChain generates a MerkleTreeLeaf from a chain (in DER-encoded form) and timestamp.
+func MerkleTreeLeafFromRawChain(rawChain []ASN1Cert, etype LogEntryType, timestamp uint64) (*MerkleTreeLeaf, error) {
+ // Need at most 3 of the chain
+ count := 3
+ if count > len(rawChain) {
+ count = len(rawChain)
+ }
+ chain := make([]*x509.Certificate, count)
+ for i := range chain {
+ cert, err := x509.ParseCertificate(rawChain[i].Data)
+ if x509.IsFatal(err) {
+ return nil, fmt.Errorf("failed to parse chain[%d] cert: %v", i, err)
+ }
+ chain[i] = cert
+ }
+ return MerkleTreeLeafFromChain(chain, etype, timestamp)
+}
+
+// MerkleTreeLeafFromChain generates a MerkleTreeLeaf from a chain and timestamp.
+func MerkleTreeLeafFromChain(chain []*x509.Certificate, etype LogEntryType, timestamp uint64) (*MerkleTreeLeaf, error) {
+ leaf := MerkleTreeLeaf{
+ Version: V1,
+ LeafType: TimestampedEntryLeafType,
+ TimestampedEntry: &TimestampedEntry{
+ EntryType: etype,
+ Timestamp: timestamp,
+ },
+ }
+ if etype == X509LogEntryType {
+ leaf.TimestampedEntry.X509Entry = &ASN1Cert{Data: chain[0].Raw}
+ return &leaf, nil
+ }
+ if etype != PrecertLogEntryType {
+ return nil, fmt.Errorf("unknown LogEntryType %d", etype)
+ }
+
+ // Pre-certs are more complicated. First, parse the leaf pre-cert and its
+ // putative issuer.
+ if len(chain) < 2 {
+ return nil, fmt.Errorf("no issuer cert available for precert leaf building")
+ }
+ issuer := chain[1]
+ cert := chain[0]
+
+ var preIssuer *x509.Certificate
+ if IsPreIssuer(issuer) {
+ // Replace the cert's issuance information with details from the pre-issuer.
+ preIssuer = issuer
+
+ // The issuer of the pre-cert is not going to be the issuer of the final
+ // cert. Change to use the final issuer's key hash.
+ if len(chain) < 3 {
+ return nil, fmt.Errorf("no issuer cert available for pre-issuer")
+ }
+ issuer = chain[2]
+ }
+
+ // Next, post-process the DER-encoded TBSCertificate, to remove the CT poison
+ // extension and possibly update the issuer field.
+ defangedTBS, err := x509.BuildPrecertTBS(cert.RawTBSCertificate, preIssuer)
+ if err != nil {
+ return nil, fmt.Errorf("failed to remove poison extension: %v", err)
+ }
+
+ leaf.TimestampedEntry.EntryType = PrecertLogEntryType
+ leaf.TimestampedEntry.PrecertEntry = &PreCert{
+ IssuerKeyHash: sha256.Sum256(issuer.RawSubjectPublicKeyInfo),
+ TBSCertificate: defangedTBS,
+ }
+ return &leaf, nil
+}
+
+// MerkleTreeLeafForEmbeddedSCT generates a MerkleTreeLeaf from a chain and an
+// SCT timestamp, where the leaf certificate at chain[0] is a certificate that
+// contains embedded SCTs. It is assumed that the timestamp provided is from
+// one of the SCTs embedded within the leaf certificate.
+func MerkleTreeLeafForEmbeddedSCT(chain []*x509.Certificate, timestamp uint64) (*MerkleTreeLeaf, error) {
+ // For building the leaf for a certificate and SCT where the SCT is embedded
+ // in the certificate, we need to build the original precertificate TBS
+ // data. First, parse the leaf cert and its issuer.
+ if len(chain) < 2 {
+ return nil, fmt.Errorf("no issuer cert available for precert leaf building")
+ }
+ issuer := chain[1]
+ cert := chain[0]
+
+ // Next, post-process the DER-encoded TBSCertificate, to remove the SCTList
+ // extension.
+ tbs, err := x509.RemoveSCTList(cert.RawTBSCertificate)
+ if err != nil {
+ return nil, fmt.Errorf("failed to remove SCT List extension: %v", err)
+ }
+
+ return &MerkleTreeLeaf{
+ Version: V1,
+ LeafType: TimestampedEntryLeafType,
+ TimestampedEntry: &TimestampedEntry{
+ EntryType: PrecertLogEntryType,
+ Timestamp: timestamp,
+ PrecertEntry: &PreCert{
+ IssuerKeyHash: sha256.Sum256(issuer.RawSubjectPublicKeyInfo),
+ TBSCertificate: tbs,
+ },
+ },
+ }, nil
+}
+
+// LeafHashForLeaf returns the leaf hash for a Merkle tree leaf.
+func LeafHashForLeaf(leaf *MerkleTreeLeaf) ([sha256.Size]byte, error) {
+ leafData, err := tls.Marshal(*leaf)
+ if err != nil {
+ return [sha256.Size]byte{}, fmt.Errorf("failed to tls-encode MerkleTreeLeaf: %s", err)
+ }
+
+ data := append([]byte{TreeLeafPrefix}, leafData...)
+ leafHash := sha256.Sum256(data)
+ return leafHash, nil
+}
+
+// IsPreIssuer indicates whether a certificate is a pre-cert issuer with the specific
+// certificate transparency extended key usage.
+func IsPreIssuer(issuer *x509.Certificate) bool {
+ for _, eku := range issuer.ExtKeyUsage {
+ if eku == x509.ExtKeyUsageCertificateTransparency {
+ return true
+ }
+ }
+ return false
+}
+
+// RawLogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data
+// after JSON parsing) into a RawLogEntry object (i.e. a TLS-parsed structure).
+func RawLogEntryFromLeaf(index int64, entry *LeafEntry) (*RawLogEntry, error) {
+ ret := RawLogEntry{Index: index}
+ if rest, err := tls.Unmarshal(entry.LeafInput, &ret.Leaf); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal MerkleTreeLeaf: %v", err)
+ } else if len(rest) > 0 {
+ return nil, fmt.Errorf("MerkleTreeLeaf: trailing data %d bytes", len(rest))
+ }
+
+ switch eType := ret.Leaf.TimestampedEntry.EntryType; eType {
+ case X509LogEntryType:
+ var certChain CertificateChain
+ if rest, err := tls.Unmarshal(entry.ExtraData, &certChain); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal CertificateChain: %v", err)
+ } else if len(rest) > 0 {
+ return nil, fmt.Errorf("CertificateChain: trailing data %d bytes", len(rest))
+ }
+ ret.Cert = *ret.Leaf.TimestampedEntry.X509Entry
+ ret.Chain = certChain.Entries
+
+ case PrecertLogEntryType:
+ var precertChain PrecertChainEntry
+ if rest, err := tls.Unmarshal(entry.ExtraData, &precertChain); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal PrecertChainEntry: %v", err)
+ } else if len(rest) > 0 {
+ return nil, fmt.Errorf("PrecertChainEntry: trailing data %d bytes", len(rest))
+ }
+ ret.Cert = precertChain.PreCertificate
+ ret.Chain = precertChain.CertificateChain
+
+ default:
+ // TODO(pavelkalinnikov): Section 4.6 of RFC6962 implies that unknown types
+ // are not errors. We should revisit how we process this case.
+ return nil, fmt.Errorf("unknown entry type: %v", eType)
+ }
+
+ return &ret, nil
+}
+
+// ToLogEntry converts RawLogEntry to a LogEntry, which includes an x509-parsed
+// (pre-)certificate.
+//
+// Note that this function may return a valid LogEntry object and a non-nil
+// error value, when the error indicates a non-fatal parsing error.
+func (rle *RawLogEntry) ToLogEntry() (*LogEntry, error) {
+ var err error
+ entry := LogEntry{Index: rle.Index, Leaf: rle.Leaf, Chain: rle.Chain}
+
+ switch eType := rle.Leaf.TimestampedEntry.EntryType; eType {
+ case X509LogEntryType:
+ entry.X509Cert, err = rle.Leaf.X509Certificate()
+ if x509.IsFatal(err) {
+ return nil, fmt.Errorf("failed to parse certificate: %v", err)
+ }
+
+ case PrecertLogEntryType:
+ var tbsCert *x509.Certificate
+ tbsCert, err = rle.Leaf.Precertificate()
+ if x509.IsFatal(err) {
+ return nil, fmt.Errorf("failed to parse precertificate: %v", err)
+ }
+ entry.Precert = &Precertificate{
+ Submitted: rle.Cert,
+ IssuerKeyHash: rle.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash,
+ TBSCertificate: tbsCert,
+ }
+
+ default:
+ return nil, fmt.Errorf("unknown entry type: %v", eType)
+ }
+
+ // err may be non-nil for a non-fatal error.
+ return &entry, err
+}
+
+// LogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data
+// after JSON parsing) into a LogEntry object (which includes x509.Certificate
+// objects, after TLS and ASN.1 parsing).
+//
+// Note that this function may return a valid LogEntry object and a non-nil
+// error value, when the error indicates a non-fatal parsing error.
+func LogEntryFromLeaf(index int64, leaf *LeafEntry) (*LogEntry, error) {
+ rle, err := RawLogEntryFromLeaf(index, leaf)
+ if err != nil {
+ return nil, err
+ }
+ return rle.ToLogEntry()
+}
+
+// TimestampToTime converts a timestamp in the style of RFC 6962 (milliseconds
+// since UNIX epoch) to a Go Time.
+func TimestampToTime(ts uint64) time.Time {
+ secs := int64(ts / 1000)
+ msecs := int64(ts % 1000)
+ return time.Unix(secs, msecs*1000000)
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/signatures.go b/vendor/github.com/google/certificate-transparency-go/signatures.go
new file mode 100644
index 00000000000..b009008c6f5
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/signatures.go
@@ -0,0 +1,110 @@
+// Copyright 2015 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ct
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/pem"
+ "fmt"
+ "log"
+
+ "github.com/google/certificate-transparency-go/tls"
+ "github.com/google/certificate-transparency-go/x509"
+)
+
+// AllowVerificationWithNonCompliantKeys may be set to true in order to allow
+// SignatureVerifier to use keys which are technically non-compliant with
+// RFC6962.
+var AllowVerificationWithNonCompliantKeys = false
+
+// PublicKeyFromPEM parses a PEM formatted block and returns the public key contained within and any remaining unread bytes, or an error.
+func PublicKeyFromPEM(b []byte) (crypto.PublicKey, SHA256Hash, []byte, error) {
+ p, rest := pem.Decode(b)
+ if p == nil {
+ return nil, [sha256.Size]byte{}, rest, fmt.Errorf("no PEM block found in %s", string(b))
+ }
+ k, err := x509.ParsePKIXPublicKey(p.Bytes)
+ return k, sha256.Sum256(p.Bytes), rest, err
+}
+
+// PublicKeyFromB64 parses a base64-encoded public key.
+func PublicKeyFromB64(b64PubKey string) (crypto.PublicKey, error) {
+ der, err := base64.StdEncoding.DecodeString(b64PubKey)
+ if err != nil {
+ return nil, fmt.Errorf("error decoding public key: %s", err)
+ }
+ return x509.ParsePKIXPublicKey(der)
+}
+
+// SignatureVerifier can verify signatures on SCTs and STHs
+type SignatureVerifier struct {
+ PubKey crypto.PublicKey
+}
+
+// NewSignatureVerifier creates a new SignatureVerifier using the passed in PublicKey.
+func NewSignatureVerifier(pk crypto.PublicKey) (*SignatureVerifier, error) {
+ switch pkType := pk.(type) {
+ case *rsa.PublicKey:
+ if pkType.N.BitLen() < 2048 {
+ e := fmt.Errorf("public key is RSA with < 2048 bits (size:%d)", pkType.N.BitLen())
+ if !AllowVerificationWithNonCompliantKeys {
+ return nil, e
+ }
+ log.Printf("WARNING: %v", e)
+ }
+ case *ecdsa.PublicKey:
+ params := *(pkType.Params())
+ if params != *elliptic.P256().Params() {
+ e := fmt.Errorf("public is ECDSA, but not on the P256 curve")
+ if !AllowVerificationWithNonCompliantKeys {
+ return nil, e
+ }
+ log.Printf("WARNING: %v", e)
+
+ }
+ default:
+ return nil, fmt.Errorf("unsupported public key type %v", pkType)
+ }
+
+ return &SignatureVerifier{PubKey: pk}, nil
+}
+
+// VerifySignature verifies the given signature sig matches the data.
+func (s SignatureVerifier) VerifySignature(data []byte, sig tls.DigitallySigned) error {
+ return tls.VerifySignature(s.PubKey, data, sig)
+}
+
+// VerifySCTSignature verifies that the SCT's signature is valid for the given LogEntry.
+func (s SignatureVerifier) VerifySCTSignature(sct SignedCertificateTimestamp, entry LogEntry) error {
+ sctData, err := SerializeSCTSignatureInput(sct, entry)
+ if err != nil {
+ return err
+ }
+ return s.VerifySignature(sctData, tls.DigitallySigned(sct.Signature))
+}
+
+// VerifySTHSignature verifies that the STH's signature is valid.
+func (s SignatureVerifier) VerifySTHSignature(sth SignedTreeHead) error {
+ sthData, err := SerializeSTHSignatureInput(sth)
+ if err != nil {
+ return err
+ }
+ return s.VerifySignature(sthData, tls.DigitallySigned(sth.TreeHeadSignature))
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/tls/signature.go b/vendor/github.com/google/certificate-transparency-go/tls/signature.go
new file mode 100644
index 00000000000..bc174df2124
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/tls/signature.go
@@ -0,0 +1,152 @@
+// Copyright 2016 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tls
+
+import (
+ "crypto"
+ "crypto/dsa" //nolint:staticcheck
+ "crypto/ecdsa"
+ _ "crypto/md5" // For registration side-effect
+ "crypto/rand"
+ "crypto/rsa"
+ _ "crypto/sha1" // For registration side-effect
+ _ "crypto/sha256" // For registration side-effect
+ _ "crypto/sha512" // For registration side-effect
+ "errors"
+ "fmt"
+ "log"
+ "math/big"
+
+ "github.com/google/certificate-transparency-go/asn1"
+)
+
+type dsaSig struct {
+ R, S *big.Int
+}
+
+func generateHash(algo HashAlgorithm, data []byte) ([]byte, crypto.Hash, error) {
+ var hashType crypto.Hash
+ switch algo {
+ case MD5:
+ hashType = crypto.MD5
+ case SHA1:
+ hashType = crypto.SHA1
+ case SHA224:
+ hashType = crypto.SHA224
+ case SHA256:
+ hashType = crypto.SHA256
+ case SHA384:
+ hashType = crypto.SHA384
+ case SHA512:
+ hashType = crypto.SHA512
+ default:
+ return nil, hashType, fmt.Errorf("unsupported Algorithm.Hash in signature: %v", algo)
+ }
+
+ hasher := hashType.New()
+ if _, err := hasher.Write(data); err != nil {
+ return nil, hashType, fmt.Errorf("failed to write to hasher: %v", err)
+ }
+ return hasher.Sum([]byte{}), hashType, nil
+}
+
+// VerifySignature verifies that the passed in signature over data was created by the given PublicKey.
+func VerifySignature(pubKey crypto.PublicKey, data []byte, sig DigitallySigned) error {
+ hash, hashType, err := generateHash(sig.Algorithm.Hash, data)
+ if err != nil {
+ return err
+ }
+
+ switch sig.Algorithm.Signature {
+ case RSA:
+ rsaKey, ok := pubKey.(*rsa.PublicKey)
+ if !ok {
+ return fmt.Errorf("cannot verify RSA signature with %T key", pubKey)
+ }
+ if err := rsa.VerifyPKCS1v15(rsaKey, hashType, hash, sig.Signature); err != nil {
+ return fmt.Errorf("failed to verify rsa signature: %v", err)
+ }
+ case DSA:
+ dsaKey, ok := pubKey.(*dsa.PublicKey)
+ if !ok {
+ return fmt.Errorf("cannot verify DSA signature with %T key", pubKey)
+ }
+ var dsaSig dsaSig
+ rest, err := asn1.Unmarshal(sig.Signature, &dsaSig)
+ if err != nil {
+ return fmt.Errorf("failed to unmarshal DSA signature: %v", err)
+ }
+ if len(rest) != 0 {
+ log.Printf("Garbage following signature %q", rest)
+ }
+ if dsaSig.R.Sign() <= 0 || dsaSig.S.Sign() <= 0 {
+ return errors.New("DSA signature contained zero or negative values")
+ }
+ if !dsa.Verify(dsaKey, hash, dsaSig.R, dsaSig.S) {
+ return errors.New("failed to verify DSA signature")
+ }
+ case ECDSA:
+ ecdsaKey, ok := pubKey.(*ecdsa.PublicKey)
+ if !ok {
+ return fmt.Errorf("cannot verify ECDSA signature with %T key", pubKey)
+ }
+ var ecdsaSig dsaSig
+ rest, err := asn1.Unmarshal(sig.Signature, &ecdsaSig)
+ if err != nil {
+ return fmt.Errorf("failed to unmarshal ECDSA signature: %v", err)
+ }
+ if len(rest) != 0 {
+ log.Printf("Garbage following signature %q", rest)
+ }
+ if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 {
+ return errors.New("ECDSA signature contained zero or negative values")
+ }
+
+ if !ecdsa.Verify(ecdsaKey, hash, ecdsaSig.R, ecdsaSig.S) {
+ return errors.New("failed to verify ECDSA signature")
+ }
+ default:
+ return fmt.Errorf("unsupported Algorithm.Signature in signature: %v", sig.Algorithm.Hash)
+ }
+ return nil
+}
+
+// CreateSignature builds a signature over the given data using the specified hash algorithm and private key.
+func CreateSignature(privKey crypto.PrivateKey, hashAlgo HashAlgorithm, data []byte) (DigitallySigned, error) {
+ var sig DigitallySigned
+ sig.Algorithm.Hash = hashAlgo
+ hash, hashType, err := generateHash(sig.Algorithm.Hash, data)
+ if err != nil {
+ return sig, err
+ }
+
+ switch privKey := privKey.(type) {
+ case rsa.PrivateKey:
+ sig.Algorithm.Signature = RSA
+ sig.Signature, err = rsa.SignPKCS1v15(rand.Reader, &privKey, hashType, hash)
+ return sig, err
+ case ecdsa.PrivateKey:
+ sig.Algorithm.Signature = ECDSA
+ var ecdsaSig dsaSig
+ ecdsaSig.R, ecdsaSig.S, err = ecdsa.Sign(rand.Reader, &privKey, hash)
+ if err != nil {
+ return sig, err
+ }
+ sig.Signature, err = asn1.Marshal(ecdsaSig)
+ return sig, err
+ default:
+ return sig, fmt.Errorf("unsupported private key type %T", privKey)
+ }
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/tls/tls.go b/vendor/github.com/google/certificate-transparency-go/tls/tls.go
new file mode 100644
index 00000000000..a48c998f48d
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/tls/tls.go
@@ -0,0 +1,711 @@
+// Copyright 2016 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package tls implements functionality for dealing with TLS-encoded data,
+// as defined in RFC 5246. This includes parsing and generation of TLS-encoded
+// data, together with utility functions for dealing with the DigitallySigned
+// TLS type.
+package tls
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// This file holds utility functions for TLS encoding/decoding data
+// as per RFC 5246 section 4.
+
+// A structuralError suggests that the TLS data is valid, but the Go type
+// which is receiving it doesn't match.
+type structuralError struct {
+ field string
+ msg string
+}
+
+func (e structuralError) Error() string {
+ var prefix string
+ if e.field != "" {
+ prefix = e.field + ": "
+ }
+ return "tls: structure error: " + prefix + e.msg
+}
+
+// A syntaxError suggests that the TLS data is invalid.
+type syntaxError struct {
+ field string
+ msg string
+}
+
+func (e syntaxError) Error() string {
+ var prefix string
+ if e.field != "" {
+ prefix = e.field + ": "
+ }
+ return "tls: syntax error: " + prefix + e.msg
+}
+
+// Uint24 is an unsigned 3-byte integer.
+type Uint24 uint32
+
+// Enum is an unsigned integer.
+type Enum uint64
+
+var (
+ uint8Type = reflect.TypeOf(uint8(0))
+ uint16Type = reflect.TypeOf(uint16(0))
+ uint24Type = reflect.TypeOf(Uint24(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ enumType = reflect.TypeOf(Enum(0))
+)
+
+// Unmarshal parses the TLS-encoded data in b and uses the reflect package to
+// fill in an arbitrary value pointed at by val. Because Unmarshal uses the
+// reflect package, the structs being written to must use exported fields
+// (upper case names).
+//
+// The mappings between TLS types and Go types is as follows; some fields
+// must have tags (to indicate their encoded size).
+//
+// TLS Go Required Tags
+// opaque byte / uint8
+// uint8 byte / uint8
+// uint16 uint16
+// uint24 tls.Uint24
+// uint32 uint32
+// uint64 uint64
+// enum tls.Enum size:S or maxval:N
+// Type []Type minlen:N,maxlen:M
+// opaque[N] [N]byte / [N]uint8
+// uint8[N] [N]byte / [N]uint8
+// struct { } struct { }
+// select(T) {
+// case e1: Type *T selector:Field,val:e1
+// }
+//
+// TLS variants (RFC 5246 s4.6.1) are only supported when the value of the
+// associated enumeration type is available earlier in the same enclosing
+// struct, and each possible variant is marked with a selector tag (to
+// indicate which field selects the variants) and a val tag (to indicate
+// what value of the selector picks this particular field).
+//
+// For example, a TLS structure:
+//
+// enum { e1(1), e2(2) } EnumType;
+// struct {
+// EnumType sel;
+// select(sel) {
+// case e1: uint16
+// case e2: uint32
+// } data;
+// } VariantItem;
+//
+// would have a corresponding Go type:
+//
+// type VariantItem struct {
+// Sel tls.Enum `tls:"maxval:2"`
+// Data16 *uint16 `tls:"selector:Sel,val:1"`
+// Data32 *uint32 `tls:"selector:Sel,val:2"`
+// }
+//
+// TLS fixed-length vectors of types other than opaque or uint8 are not supported.
+//
+// For TLS variable-length vectors that are themselves used in other vectors,
+// create a single-field structure to represent the inner type. For example, for:
+//
+// opaque InnerType<1..65535>;
+// struct {
+// InnerType inners<1,65535>;
+// } Something;
+//
+// convert to:
+//
+// type InnerType struct {
+// Val []byte `tls:"minlen:1,maxlen:65535"`
+// }
+// type Something struct {
+// Inners []InnerType `tls:"minlen:1,maxlen:65535"`
+// }
+//
+// If the encoded value does not fit in the Go type, Unmarshal returns a parse error.
+func Unmarshal(b []byte, val interface{}) ([]byte, error) {
+ return UnmarshalWithParams(b, val, "")
+}
+
+// UnmarshalWithParams allows field parameters to be specified for the
+// top-level element. The form of the params is the same as the field tags.
+func UnmarshalWithParams(b []byte, val interface{}, params string) ([]byte, error) {
+ info, err := fieldTagToFieldInfo(params, "")
+ if err != nil {
+ return nil, err
+ }
+ // The passed in interface{} is a pointer (to allow the value to be written
+ // to); extract the pointed-to object as a reflect.Value, so parseField
+ // can do various introspection things.
+ v := reflect.ValueOf(val).Elem()
+ offset, err := parseField(v, b, 0, info)
+ if err != nil {
+ return nil, err
+ }
+ return b[offset:], nil
+}
+
+// Return the number of bytes needed to encode values up to (and including) x.
+func byteCount(x uint64) uint {
+ switch {
+ case x < 0x100:
+ return 1
+ case x < 0x10000:
+ return 2
+ case x < 0x1000000:
+ return 3
+ case x < 0x100000000:
+ return 4
+ case x < 0x10000000000:
+ return 5
+ case x < 0x1000000000000:
+ return 6
+ case x < 0x100000000000000:
+ return 7
+ default:
+ return 8
+ }
+}
+
+type fieldInfo struct {
+ count uint // Number of bytes
+ countSet bool
+ minlen uint64 // Only relevant for slices
+ maxlen uint64 // Only relevant for slices
+ selector string // Only relevant for select sub-values
+ val uint64 // Only relevant for select sub-values
+ name string // Used for better error messages
+}
+
+func (i *fieldInfo) fieldName() string {
+ if i == nil {
+ return ""
+ }
+ return i.name
+}
+
+// Given a tag string, return a fieldInfo describing the field.
+func fieldTagToFieldInfo(str string, name string) (*fieldInfo, error) {
+ var info *fieldInfo
+ // Iterate over clauses in the tag, ignoring any that don't parse properly.
+ for _, part := range strings.Split(str, ",") {
+ switch {
+ case strings.HasPrefix(part, "maxval:"):
+ if v, err := strconv.ParseUint(part[7:], 10, 64); err == nil {
+ info = &fieldInfo{count: byteCount(v), countSet: true}
+ }
+ case strings.HasPrefix(part, "size:"):
+ if sz, err := strconv.ParseUint(part[5:], 10, 32); err == nil {
+ info = &fieldInfo{count: uint(sz), countSet: true}
+ }
+ case strings.HasPrefix(part, "maxlen:"):
+ v, err := strconv.ParseUint(part[7:], 10, 64)
+ if err != nil {
+ continue
+ }
+ if info == nil {
+ info = &fieldInfo{}
+ }
+ info.count = byteCount(v)
+ info.countSet = true
+ info.maxlen = v
+ case strings.HasPrefix(part, "minlen:"):
+ v, err := strconv.ParseUint(part[7:], 10, 64)
+ if err != nil {
+ continue
+ }
+ if info == nil {
+ info = &fieldInfo{}
+ }
+ info.minlen = v
+ case strings.HasPrefix(part, "selector:"):
+ if info == nil {
+ info = &fieldInfo{}
+ }
+ info.selector = part[9:]
+ case strings.HasPrefix(part, "val:"):
+ v, err := strconv.ParseUint(part[4:], 10, 64)
+ if err != nil {
+ continue
+ }
+ if info == nil {
+ info = &fieldInfo{}
+ }
+ info.val = v
+ }
+ }
+ if info != nil {
+ info.name = name
+ if info.selector == "" {
+ if info.count < 1 {
+ return nil, structuralError{name, "field of unknown size in " + str}
+ } else if info.count > 8 {
+ return nil, structuralError{name, "specified size too large in " + str}
+ } else if info.minlen > info.maxlen {
+ return nil, structuralError{name, "specified length range inverted in " + str}
+ } else if info.val > 0 {
+ return nil, structuralError{name, "specified selector value but not field in " + str}
+ }
+ }
+ } else if name != "" {
+ info = &fieldInfo{name: name}
+ }
+ return info, nil
+}
+
+// Check that a value fits into a field described by a fieldInfo structure.
+func (i fieldInfo) check(val uint64, fldName string) error {
+ if val >= (1 << (8 * i.count)) {
+ return structuralError{fldName, fmt.Sprintf("value %d too large for size", val)}
+ }
+ if i.maxlen != 0 {
+ if val < i.minlen {
+ return structuralError{fldName, fmt.Sprintf("value %d too small for minimum %d", val, i.minlen)}
+ }
+ if val > i.maxlen {
+ return structuralError{fldName, fmt.Sprintf("value %d too large for maximum %d", val, i.maxlen)}
+ }
+ }
+ return nil
+}
+
+// readVarUint reads an big-endian unsigned integer of the given size in
+// bytes.
+func readVarUint(data []byte, info *fieldInfo) (uint64, error) {
+ if info == nil || !info.countSet {
+ return 0, structuralError{info.fieldName(), "no field size information available"}
+ }
+ if len(data) < int(info.count) {
+ return 0, syntaxError{info.fieldName(), "truncated variable-length integer"}
+ }
+ var result uint64
+ for i := uint(0); i < info.count; i++ {
+ result = (result << 8) | uint64(data[i])
+ }
+ if err := info.check(result, info.name); err != nil {
+ return 0, err
+ }
+ return result, nil
+}
+
+// parseField is the main parsing function. Given a byte slice and an offset
+// (in bytes) into the data, it will try to parse a suitable ASN.1 value out
+// and store it in the given Value.
+func parseField(v reflect.Value, data []byte, initOffset int, info *fieldInfo) (int, error) {
+ offset := initOffset
+ rest := data[offset:]
+
+ fieldType := v.Type()
+ // First look for known fixed types.
+ switch fieldType {
+ case uint8Type:
+ if len(rest) < 1 {
+ return offset, syntaxError{info.fieldName(), "truncated uint8"}
+ }
+ v.SetUint(uint64(rest[0]))
+ offset++
+ return offset, nil
+ case uint16Type:
+ if len(rest) < 2 {
+ return offset, syntaxError{info.fieldName(), "truncated uint16"}
+ }
+ v.SetUint(uint64(binary.BigEndian.Uint16(rest)))
+ offset += 2
+ return offset, nil
+ case uint24Type:
+ if len(rest) < 3 {
+ return offset, syntaxError{info.fieldName(), "truncated uint24"}
+ }
+ v.SetUint(uint64(data[0])<<16 | uint64(data[1])<<8 | uint64(data[2]))
+ offset += 3
+ return offset, nil
+ case uint32Type:
+ if len(rest) < 4 {
+ return offset, syntaxError{info.fieldName(), "truncated uint32"}
+ }
+ v.SetUint(uint64(binary.BigEndian.Uint32(rest)))
+ offset += 4
+ return offset, nil
+ case uint64Type:
+ if len(rest) < 8 {
+ return offset, syntaxError{info.fieldName(), "truncated uint64"}
+ }
+ v.SetUint(uint64(binary.BigEndian.Uint64(rest)))
+ offset += 8
+ return offset, nil
+ }
+
+ // Now deal with user-defined types.
+ switch v.Kind() {
+ case enumType.Kind():
+ // Assume that anything of the same kind as Enum is an Enum, so that
+ // users can alias types of their own to Enum.
+ val, err := readVarUint(rest, info)
+ if err != nil {
+ return offset, err
+ }
+ v.SetUint(val)
+ offset += int(info.count)
+ return offset, nil
+ case reflect.Struct:
+ structType := fieldType
+ // TLS includes a select(Enum) {..} construct, where the value of an enum
+ // indicates which variant field is present (like a C union). We require
+ // that the enum value be an earlier field in the same structure (the selector),
+ // and that each of the possible variant destination fields be pointers.
+ // So the Go mapping looks like:
+ // type variantType struct {
+ // Which tls.Enum `tls:"size:1"` // this is the selector
+ // Val1 *type1 `tls:"selector:Which,val:1"` // this is a destination
+ // Val2 *type2 `tls:"selector:Which,val:1"` // this is a destination
+ // }
+
+ // To deal with this, we track any enum-like fields and their values...
+ enums := make(map[string]uint64)
+ // .. and we track which selector names we've seen (in the destination field tags),
+ // and whether a destination for that selector has been chosen.
+ selectorSeen := make(map[string]bool)
+ for i := 0; i < structType.NumField(); i++ {
+ // Find information about this field.
+ tag := structType.Field(i).Tag.Get("tls")
+ fieldInfo, err := fieldTagToFieldInfo(tag, structType.Field(i).Name)
+ if err != nil {
+ return offset, err
+ }
+
+ destination := v.Field(i)
+ if fieldInfo.selector != "" {
+ // This is a possible select(Enum) destination, so first check that the referenced
+ // selector field has already been seen earlier in the struct.
+ choice, ok := enums[fieldInfo.selector]
+ if !ok {
+ return offset, structuralError{fieldInfo.name, "selector not seen: " + fieldInfo.selector}
+ }
+ if structType.Field(i).Type.Kind() != reflect.Ptr {
+ return offset, structuralError{fieldInfo.name, "choice field not a pointer type"}
+ }
+ // Is this the first mention of the selector field name? If so, remember it.
+ seen, ok := selectorSeen[fieldInfo.selector]
+ if !ok {
+ selectorSeen[fieldInfo.selector] = false
+ }
+ if choice != fieldInfo.val {
+ // This destination field was not the chosen one, so make it nil (we checked
+ // it was a pointer above).
+ v.Field(i).Set(reflect.Zero(structType.Field(i).Type))
+ continue
+ }
+ if seen {
+ // We already saw a different destination field receive the value for this
+ // selector value, which indicates a badly annotated structure.
+ return offset, structuralError{fieldInfo.name, "duplicate selector value for " + fieldInfo.selector}
+ }
+ selectorSeen[fieldInfo.selector] = true
+ // Make an object of the pointed-to type and parse into that.
+ v.Field(i).Set(reflect.New(structType.Field(i).Type.Elem()))
+ destination = v.Field(i).Elem()
+ }
+ offset, err = parseField(destination, data, offset, fieldInfo)
+ if err != nil {
+ return offset, err
+ }
+
+ // Remember any possible tls.Enum values encountered in case they are selectors.
+ if structType.Field(i).Type.Kind() == enumType.Kind() {
+ enums[structType.Field(i).Name] = v.Field(i).Uint()
+ }
+
+ }
+
+ // Now we have seen all fields in the structure, check that all select(Enum) {..} selector
+ // fields found a destination to put their data in.
+ for selector, seen := range selectorSeen {
+ if !seen {
+ return offset, syntaxError{info.fieldName(), selector + ": unhandled value for selector"}
+ }
+ }
+ return offset, nil
+ case reflect.Array:
+ datalen := v.Len()
+
+ if datalen > len(rest) {
+ return offset, syntaxError{info.fieldName(), "truncated array"}
+ }
+ inner := rest[:datalen]
+ offset += datalen
+ if fieldType.Elem().Kind() != reflect.Uint8 {
+ // Only byte/uint8 arrays are supported
+ return offset, structuralError{info.fieldName(), "unsupported array type: " + v.Type().String()}
+ }
+ reflect.Copy(v, reflect.ValueOf(inner))
+ return offset, nil
+
+ case reflect.Slice:
+ sliceType := fieldType
+ // Slices represent variable-length vectors, which are prefixed by a length field.
+ // The fieldInfo indicates the size of that length field.
+ varlen, err := readVarUint(rest, info)
+ if err != nil {
+ return offset, err
+ }
+ datalen := int(varlen)
+ offset += int(info.count)
+ rest = rest[info.count:]
+
+ if datalen > len(rest) {
+ return offset, syntaxError{info.fieldName(), "truncated slice"}
+ }
+ inner := rest[:datalen]
+ offset += datalen
+ if fieldType.Elem().Kind() == reflect.Uint8 {
+ // Fast version for []byte
+ v.Set(reflect.MakeSlice(sliceType, datalen, datalen))
+ reflect.Copy(v, reflect.ValueOf(inner))
+ return offset, nil
+ }
+
+ v.Set(reflect.MakeSlice(sliceType, 0, datalen))
+ single := reflect.New(sliceType.Elem())
+ for innerOffset := 0; innerOffset < len(inner); {
+ var err error
+ innerOffset, err = parseField(single.Elem(), inner, innerOffset, nil)
+ if err != nil {
+ return offset, err
+ }
+ v.Set(reflect.Append(v, single.Elem()))
+ }
+ return offset, nil
+
+ default:
+ return offset, structuralError{info.fieldName(), fmt.Sprintf("unsupported type: %s of kind %s", fieldType, v.Kind())}
+ }
+}
+
+// Marshal returns the TLS encoding of val.
+func Marshal(val interface{}) ([]byte, error) {
+ return MarshalWithParams(val, "")
+}
+
+// MarshalWithParams returns the TLS encoding of val, and allows field
+// parameters to be specified for the top-level element. The form
+// of the params is the same as the field tags.
+func MarshalWithParams(val interface{}, params string) ([]byte, error) {
+ info, err := fieldTagToFieldInfo(params, "")
+ if err != nil {
+ return nil, err
+ }
+ var out bytes.Buffer
+ v := reflect.ValueOf(val)
+ if err := marshalField(&out, v, info); err != nil {
+ return nil, err
+ }
+ return out.Bytes(), err
+}
+
+func marshalField(out *bytes.Buffer, v reflect.Value, info *fieldInfo) error {
+ var prefix string
+ if info != nil && len(info.name) > 0 {
+ prefix = info.name + ": "
+ }
+ fieldType := v.Type()
+ // First look for known fixed types.
+ switch fieldType {
+ case uint8Type:
+ out.WriteByte(byte(v.Uint()))
+ return nil
+ case uint16Type:
+ scratch := make([]byte, 2)
+ binary.BigEndian.PutUint16(scratch, uint16(v.Uint()))
+ out.Write(scratch)
+ return nil
+ case uint24Type:
+ i := v.Uint()
+ if i > 0xffffff {
+ return structuralError{info.fieldName(), fmt.Sprintf("uint24 overflow %d", i)}
+ }
+ scratch := make([]byte, 4)
+ binary.BigEndian.PutUint32(scratch, uint32(i))
+ out.Write(scratch[1:])
+ return nil
+ case uint32Type:
+ scratch := make([]byte, 4)
+ binary.BigEndian.PutUint32(scratch, uint32(v.Uint()))
+ out.Write(scratch)
+ return nil
+ case uint64Type:
+ scratch := make([]byte, 8)
+ binary.BigEndian.PutUint64(scratch, uint64(v.Uint()))
+ out.Write(scratch)
+ return nil
+ }
+
+ // Now deal with user-defined types.
+ switch v.Kind() {
+ case enumType.Kind():
+ i := v.Uint()
+ if info == nil {
+ return structuralError{info.fieldName(), "enum field tag missing"}
+ }
+ if err := info.check(i, prefix); err != nil {
+ return err
+ }
+ scratch := make([]byte, 8)
+ binary.BigEndian.PutUint64(scratch, uint64(i))
+ out.Write(scratch[(8 - info.count):])
+ return nil
+ case reflect.Struct:
+ structType := fieldType
+ enums := make(map[string]uint64) // Values of any Enum fields
+ // The comment parseField() describes the mapping of the TLS select(Enum) {..} construct;
+ // here we have selector and source (rather than destination) fields.
+
+ // Track which selector names we've seen (in the source field tags), and whether a source
+ // value for that selector has been processed.
+ selectorSeen := make(map[string]bool)
+ for i := 0; i < structType.NumField(); i++ {
+ // Find information about this field.
+ tag := structType.Field(i).Tag.Get("tls")
+ fieldInfo, err := fieldTagToFieldInfo(tag, structType.Field(i).Name)
+ if err != nil {
+ return err
+ }
+
+ source := v.Field(i)
+ if fieldInfo.selector != "" {
+ // This field is a possible source for a select(Enum) {..}. First check
+ // the selector field name has been seen.
+ choice, ok := enums[fieldInfo.selector]
+ if !ok {
+ return structuralError{fieldInfo.name, "selector not seen: " + fieldInfo.selector}
+ }
+ if structType.Field(i).Type.Kind() != reflect.Ptr {
+ return structuralError{fieldInfo.name, "choice field not a pointer type"}
+ }
+ // Is this the first mention of the selector field name? If so, remember it.
+ seen, ok := selectorSeen[fieldInfo.selector]
+ if !ok {
+ selectorSeen[fieldInfo.selector] = false
+ }
+ if choice != fieldInfo.val {
+ // This source was not chosen; police that it should be nil.
+ if v.Field(i).Pointer() != uintptr(0) {
+ return structuralError{fieldInfo.name, "unchosen field is non-nil"}
+ }
+ continue
+ }
+ if seen {
+ // We already saw a different source field generate the value for this
+ // selector value, which indicates a badly annotated structure.
+ return structuralError{fieldInfo.name, "duplicate selector value for " + fieldInfo.selector}
+ }
+ selectorSeen[fieldInfo.selector] = true
+ if v.Field(i).Pointer() == uintptr(0) {
+ return structuralError{fieldInfo.name, "chosen field is nil"}
+ }
+ // Marshal from the pointed-to source object.
+ source = v.Field(i).Elem()
+ }
+
+ var fieldData bytes.Buffer
+ if err := marshalField(&fieldData, source, fieldInfo); err != nil {
+ return err
+ }
+ out.Write(fieldData.Bytes())
+
+ // Remember any tls.Enum values encountered in case they are selectors.
+ if structType.Field(i).Type.Kind() == enumType.Kind() {
+ enums[structType.Field(i).Name] = v.Field(i).Uint()
+ }
+ }
+ // Now we have seen all fields in the structure, check that all select(Enum) {..} selector
+ // fields found a source field to get their data from.
+ for selector, seen := range selectorSeen {
+ if !seen {
+ return syntaxError{info.fieldName(), selector + ": unhandled value for selector"}
+ }
+ }
+ return nil
+
+ case reflect.Array:
+ datalen := v.Len()
+ arrayType := fieldType
+ if arrayType.Elem().Kind() != reflect.Uint8 {
+ // Only byte/uint8 arrays are supported
+ return structuralError{info.fieldName(), "unsupported array type"}
+ }
+ bytes := make([]byte, datalen)
+ for i := 0; i < datalen; i++ {
+ bytes[i] = uint8(v.Index(i).Uint())
+ }
+ _, err := out.Write(bytes)
+ return err
+
+ case reflect.Slice:
+ if info == nil {
+ return structuralError{info.fieldName(), "slice field tag missing"}
+ }
+
+ sliceType := fieldType
+ if sliceType.Elem().Kind() == reflect.Uint8 {
+ // Fast version for []byte: first write the length as info.count bytes.
+ datalen := v.Len()
+ scratch := make([]byte, 8)
+ binary.BigEndian.PutUint64(scratch, uint64(datalen))
+ out.Write(scratch[(8 - info.count):])
+
+ if err := info.check(uint64(datalen), prefix); err != nil {
+ return err
+ }
+ // Then just write the data.
+ bytes := make([]byte, datalen)
+ for i := 0; i < datalen; i++ {
+ bytes[i] = uint8(v.Index(i).Uint())
+ }
+ _, err := out.Write(bytes)
+ return err
+ }
+ // General version: use a separate Buffer to write the slice entries into.
+ var innerBuf bytes.Buffer
+ for i := 0; i < v.Len(); i++ {
+ if err := marshalField(&innerBuf, v.Index(i), nil); err != nil {
+ return err
+ }
+ }
+
+ // Now insert (and check) the size.
+ size := uint64(innerBuf.Len())
+ if err := info.check(size, prefix); err != nil {
+ return err
+ }
+ scratch := make([]byte, 8)
+ binary.BigEndian.PutUint64(scratch, size)
+ out.Write(scratch[(8 - info.count):])
+
+ // Then copy the data.
+ _, err := out.Write(innerBuf.Bytes())
+ return err
+
+ default:
+ return structuralError{info.fieldName(), fmt.Sprintf("unsupported type: %s of kind %s", fieldType, v.Kind())}
+ }
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/tls/types.go b/vendor/github.com/google/certificate-transparency-go/tls/types.go
new file mode 100644
index 00000000000..b8eaf24bdd5
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/tls/types.go
@@ -0,0 +1,117 @@
+// Copyright 2016 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tls
+
+import (
+ "crypto"
+ "crypto/dsa" //nolint:staticcheck
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "fmt"
+)
+
+// DigitallySigned gives information about a signature, including the algorithm used
+// and the signature value. Defined in RFC 5246 s4.7.
+type DigitallySigned struct {
+ Algorithm SignatureAndHashAlgorithm
+ Signature []byte `tls:"minlen:0,maxlen:65535"`
+}
+
+func (d DigitallySigned) String() string {
+ return fmt.Sprintf("Signature: HashAlgo=%v SignAlgo=%v Value=%x", d.Algorithm.Hash, d.Algorithm.Signature, d.Signature)
+}
+
+// SignatureAndHashAlgorithm gives information about the algorithms used for a
+// signature. Defined in RFC 5246 s7.4.1.4.1.
+type SignatureAndHashAlgorithm struct {
+ Hash HashAlgorithm `tls:"maxval:255"`
+ Signature SignatureAlgorithm `tls:"maxval:255"`
+}
+
+// HashAlgorithm enum from RFC 5246 s7.4.1.4.1.
+type HashAlgorithm Enum
+
+// HashAlgorithm constants from RFC 5246 s7.4.1.4.1.
+const (
+ None HashAlgorithm = 0
+ MD5 HashAlgorithm = 1
+ SHA1 HashAlgorithm = 2
+ SHA224 HashAlgorithm = 3
+ SHA256 HashAlgorithm = 4
+ SHA384 HashAlgorithm = 5
+ SHA512 HashAlgorithm = 6
+)
+
+func (h HashAlgorithm) String() string {
+ switch h {
+ case None:
+ return "None"
+ case MD5:
+ return "MD5"
+ case SHA1:
+ return "SHA1"
+ case SHA224:
+ return "SHA224"
+ case SHA256:
+ return "SHA256"
+ case SHA384:
+ return "SHA384"
+ case SHA512:
+ return "SHA512"
+ default:
+ return fmt.Sprintf("UNKNOWN(%d)", h)
+ }
+}
+
+// SignatureAlgorithm enum from RFC 5246 s7.4.1.4.1.
+type SignatureAlgorithm Enum
+
+// SignatureAlgorithm constants from RFC 5246 s7.4.1.4.1.
+const (
+ Anonymous SignatureAlgorithm = 0
+ RSA SignatureAlgorithm = 1
+ DSA SignatureAlgorithm = 2
+ ECDSA SignatureAlgorithm = 3
+)
+
+func (s SignatureAlgorithm) String() string {
+ switch s {
+ case Anonymous:
+ return "Anonymous"
+ case RSA:
+ return "RSA"
+ case DSA:
+ return "DSA"
+ case ECDSA:
+ return "ECDSA"
+ default:
+ return fmt.Sprintf("UNKNOWN(%d)", s)
+ }
+}
+
+// SignatureAlgorithmFromPubKey returns the algorithm used for this public key.
+// ECDSA, RSA, and DSA keys are supported. Other key types will return Anonymous.
+func SignatureAlgorithmFromPubKey(k crypto.PublicKey) SignatureAlgorithm {
+ switch k.(type) {
+ case *ecdsa.PublicKey:
+ return ECDSA
+ case *rsa.PublicKey:
+ return RSA
+ case *dsa.PublicKey:
+ return DSA
+ default:
+ return Anonymous
+ }
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/types.go b/vendor/github.com/google/certificate-transparency-go/types.go
new file mode 100644
index 00000000000..2a96f6a09f5
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/types.go
@@ -0,0 +1,591 @@
+// Copyright 2015 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package ct holds core types and utilities for Certificate Transparency.
+package ct
+
+import (
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+
+ "github.com/google/certificate-transparency-go/tls"
+ "github.com/google/certificate-transparency-go/x509"
+)
+
+///////////////////////////////////////////////////////////////////////////////
+// The following structures represent those outlined in RFC6962; any section
+// numbers mentioned refer to that RFC.
+///////////////////////////////////////////////////////////////////////////////
+
+// LogEntryType represents the LogEntryType enum from section 3.1:
+//
+// enum { x509_entry(0), precert_entry(1), (65535) } LogEntryType;
+type LogEntryType tls.Enum // tls:"maxval:65535"
+
+// LogEntryType constants from section 3.1.
+const (
+ X509LogEntryType LogEntryType = 0
+ PrecertLogEntryType LogEntryType = 1
+)
+
+func (e LogEntryType) String() string {
+ switch e {
+ case X509LogEntryType:
+ return "X509LogEntryType"
+ case PrecertLogEntryType:
+ return "PrecertLogEntryType"
+ default:
+ return fmt.Sprintf("UnknownEntryType(%d)", e)
+ }
+}
+
+// RFC6962 section 2.1 requires a prefix byte on hash inputs for second preimage resistance.
+const (
+ TreeLeafPrefix = byte(0x00)
+ TreeNodePrefix = byte(0x01)
+)
+
+// MerkleLeafType represents the MerkleLeafType enum from section 3.4:
+//
+// enum { timestamped_entry(0), (255) } MerkleLeafType;
+type MerkleLeafType tls.Enum // tls:"maxval:255"
+
+// TimestampedEntryLeafType is the only defined MerkleLeafType constant from section 3.4.
+const TimestampedEntryLeafType MerkleLeafType = 0 // Entry type for an SCT
+
+func (m MerkleLeafType) String() string {
+ switch m {
+ case TimestampedEntryLeafType:
+ return "TimestampedEntryLeafType"
+ default:
+ return fmt.Sprintf("UnknownLeafType(%d)", m)
+ }
+}
+
+// Version represents the Version enum from section 3.2:
+//
+// enum { v1(0), (255) } Version;
+type Version tls.Enum // tls:"maxval:255"
+
+// CT Version constants from section 3.2.
+const (
+ V1 Version = 0
+)
+
+func (v Version) String() string {
+ switch v {
+ case V1:
+ return "V1"
+ default:
+ return fmt.Sprintf("UnknownVersion(%d)", v)
+ }
+}
+
+// SignatureType differentiates STH signatures from SCT signatures, see section 3.2.
+//
+// enum { certificate_timestamp(0), tree_hash(1), (255) } SignatureType;
+type SignatureType tls.Enum // tls:"maxval:255"
+
+// SignatureType constants from section 3.2.
+const (
+ CertificateTimestampSignatureType SignatureType = 0
+ TreeHashSignatureType SignatureType = 1
+)
+
+func (st SignatureType) String() string {
+ switch st {
+ case CertificateTimestampSignatureType:
+ return "CertificateTimestamp"
+ case TreeHashSignatureType:
+ return "TreeHash"
+ default:
+ return fmt.Sprintf("UnknownSignatureType(%d)", st)
+ }
+}
+
+// ASN1Cert type for holding the raw DER bytes of an ASN.1 Certificate
+// (section 3.1).
+type ASN1Cert struct {
+ Data []byte `tls:"minlen:1,maxlen:16777215"`
+}
+
+// LogID holds the hash of the Log's public key (section 3.2).
+// TODO(pphaneuf): Users should be migrated to the one in the logid package.
+type LogID struct {
+ KeyID [sha256.Size]byte
+}
+
+// PreCert represents a Precertificate (section 3.2).
+type PreCert struct {
+ IssuerKeyHash [sha256.Size]byte
+ TBSCertificate []byte `tls:"minlen:1,maxlen:16777215"` // DER-encoded TBSCertificate
+}
+
+// CTExtensions is a representation of the raw bytes of any CtExtension
+// structure (see section 3.2).
+// nolint: revive
+type CTExtensions []byte // tls:"minlen:0,maxlen:65535"`
+
+// MerkleTreeNode represents an internal node in the CT tree.
+type MerkleTreeNode []byte
+
+// ConsistencyProof represents a CT consistency proof (see sections 2.1.2 and
+// 4.4).
+type ConsistencyProof []MerkleTreeNode
+
+// AuditPath represents a CT inclusion proof (see sections 2.1.1 and 4.5).
+type AuditPath []MerkleTreeNode
+
+// LeafInput represents a serialized MerkleTreeLeaf structure.
+type LeafInput []byte
+
+// DigitallySigned is a local alias for tls.DigitallySigned so that we can
+// attach a MarshalJSON method.
+type DigitallySigned tls.DigitallySigned
+
+// FromBase64String populates the DigitallySigned structure from the base64 data passed in.
+// Returns an error if the base64 data is invalid.
+func (d *DigitallySigned) FromBase64String(b64 string) error {
+ raw, err := base64.StdEncoding.DecodeString(b64)
+ if err != nil {
+ return fmt.Errorf("failed to unbase64 DigitallySigned: %v", err)
+ }
+ var ds tls.DigitallySigned
+ if rest, err := tls.Unmarshal(raw, &ds); err != nil {
+ return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err)
+ } else if len(rest) > 0 {
+ return fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest))
+ }
+ *d = DigitallySigned(ds)
+ return nil
+}
+
+// Base64String returns the base64 representation of the DigitallySigned struct.
+func (d DigitallySigned) Base64String() (string, error) {
+ b, err := tls.Marshal(d)
+ if err != nil {
+ return "", err
+ }
+ return base64.StdEncoding.EncodeToString(b), nil
+}
+
+// MarshalJSON implements the json.Marshaller interface.
+func (d DigitallySigned) MarshalJSON() ([]byte, error) {
+ b64, err := d.Base64String()
+ if err != nil {
+ return []byte{}, err
+ }
+ return []byte(`"` + b64 + `"`), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (d *DigitallySigned) UnmarshalJSON(b []byte) error {
+ var content string
+ if err := json.Unmarshal(b, &content); err != nil {
+ return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err)
+ }
+ return d.FromBase64String(content)
+}
+
+// RawLogEntry represents the (TLS-parsed) contents of an entry in a CT log.
+type RawLogEntry struct {
+ // Index is a position of the entry in the log.
+ Index int64
+ // Leaf is a parsed Merkle leaf hash input.
+ Leaf MerkleTreeLeaf
+ // Cert is:
+ // - A certificate if Leaf.TimestampedEntry.EntryType is X509LogEntryType.
+ // - A precertificate if Leaf.TimestampedEntry.EntryType is
+ // PrecertLogEntryType, in the form of a DER-encoded Certificate as
+ // originally added (which includes the poison extension and a signature
+ // generated over the pre-cert by the pre-cert issuer).
+ // - Empty otherwise.
+ Cert ASN1Cert
+ // Chain is the issuing certificate chain starting with the issuer of Cert,
+ // or an empty slice if Cert is empty.
+ Chain []ASN1Cert
+}
+
+// LogEntry represents the (parsed) contents of an entry in a CT log. This is described
+// in section 3.1, but note that this structure does *not* match the TLS structure
+// defined there (the TLS structure is never used directly in RFC6962).
+type LogEntry struct {
+ Index int64
+ Leaf MerkleTreeLeaf
+ // Exactly one of the following three fields should be non-empty.
+ X509Cert *x509.Certificate // Parsed X.509 certificate
+ Precert *Precertificate // Extracted precertificate
+ JSONData []byte
+
+ // Chain holds the issuing certificate chain, starting with the
+ // issuer of the leaf certificate / pre-certificate.
+ Chain []ASN1Cert
+}
+
+// PrecertChainEntry holds an precertificate together with a validation chain
+// for it; see section 3.1.
+type PrecertChainEntry struct {
+ PreCertificate ASN1Cert `tls:"minlen:1,maxlen:16777215"`
+ CertificateChain []ASN1Cert `tls:"minlen:0,maxlen:16777215"`
+}
+
+// CertificateChain holds a chain of certificates, as returned as extra data
+// for get-entries (section 4.6).
+type CertificateChain struct {
+ Entries []ASN1Cert `tls:"minlen:0,maxlen:16777215"`
+}
+
+// PrecertChainEntryHash is an extended PrecertChainEntry type with the
+// IssuanceChainHash field added to store the hash of the
+// CertificateChain field of PrecertChainEntry.
+type PrecertChainEntryHash struct {
+ PreCertificate ASN1Cert `tls:"minlen:1,maxlen:16777215"`
+ IssuanceChainHash []byte `tls:"minlen:0,maxlen:256"`
+}
+
+// CertificateChainHash is an extended CertificateChain type with the
+// IssuanceChainHash field added to store the hash of the
+// Entries field of CertificateChain.
+type CertificateChainHash struct {
+ IssuanceChainHash []byte `tls:"minlen:0,maxlen:256"`
+}
+
+// JSONDataEntry holds arbitrary data.
+type JSONDataEntry struct {
+ Data []byte `tls:"minlen:0,maxlen:1677215"`
+}
+
+// SHA256Hash represents the output from the SHA256 hash function.
+type SHA256Hash [sha256.Size]byte
+
+// FromBase64String populates the SHA256 struct with the contents of the base64 data passed in.
+func (s *SHA256Hash) FromBase64String(b64 string) error {
+ bs, err := base64.StdEncoding.DecodeString(b64)
+ if err != nil {
+ return fmt.Errorf("failed to unbase64 LogID: %v", err)
+ }
+ if len(bs) != sha256.Size {
+ return fmt.Errorf("invalid SHA256 length, expected 32 but got %d", len(bs))
+ }
+ copy(s[:], bs)
+ return nil
+}
+
+// Base64String returns the base64 representation of this SHA256Hash.
+func (s SHA256Hash) Base64String() string {
+ return base64.StdEncoding.EncodeToString(s[:])
+}
+
+// MarshalJSON implements the json.Marshaller interface for SHA256Hash.
+func (s SHA256Hash) MarshalJSON() ([]byte, error) {
+ return []byte(`"` + s.Base64String() + `"`), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+func (s *SHA256Hash) UnmarshalJSON(b []byte) error {
+ var content string
+ if err := json.Unmarshal(b, &content); err != nil {
+ return fmt.Errorf("failed to unmarshal SHA256Hash: %v", err)
+ }
+ return s.FromBase64String(content)
+}
+
+// SignedTreeHead represents the structure returned by the get-sth CT method
+// after base64 decoding; see sections 3.5 and 4.3.
+type SignedTreeHead struct {
+ Version Version `json:"sth_version"` // The version of the protocol to which the STH conforms
+ TreeSize uint64 `json:"tree_size"` // The number of entries in the new tree
+ Timestamp uint64 `json:"timestamp"` // The time at which the STH was created
+ SHA256RootHash SHA256Hash `json:"sha256_root_hash"` // The root hash of the log's Merkle tree
+ TreeHeadSignature DigitallySigned `json:"tree_head_signature"` // Log's signature over a TLS-encoded TreeHeadSignature
+ LogID SHA256Hash `json:"log_id"` // The SHA256 hash of the log's public key
+}
+
+func (s SignedTreeHead) String() string {
+ sigStr, err := s.TreeHeadSignature.Base64String()
+ if err != nil {
+ sigStr = tls.DigitallySigned(s.TreeHeadSignature).String()
+ }
+
+ // If the LogID field in the SignedTreeHead is empty, don't include it in
+ // the string.
+ var logIDStr string
+ if id, empty := s.LogID, (SHA256Hash{}); id != empty {
+ logIDStr = fmt.Sprintf("LogID:%s, ", id.Base64String())
+ }
+
+ return fmt.Sprintf("{%sTreeSize:%d, Timestamp:%d, SHA256RootHash:%q, TreeHeadSignature:%q}",
+ logIDStr, s.TreeSize, s.Timestamp, s.SHA256RootHash.Base64String(), sigStr)
+}
+
+// TreeHeadSignature holds the data over which the signature in an STH is
+// generated; see section 3.5
+type TreeHeadSignature struct {
+ Version Version `tls:"maxval:255"`
+ SignatureType SignatureType `tls:"maxval:255"` // == TreeHashSignatureType
+ Timestamp uint64
+ TreeSize uint64
+ SHA256RootHash SHA256Hash
+}
+
+// SignedCertificateTimestamp represents the structure returned by the
+// add-chain and add-pre-chain methods after base64 decoding; see sections
+// 3.2, 4.1 and 4.2.
+type SignedCertificateTimestamp struct {
+ SCTVersion Version `tls:"maxval:255"`
+ LogID LogID
+ Timestamp uint64
+ Extensions CTExtensions `tls:"minlen:0,maxlen:65535"`
+ Signature DigitallySigned // Signature over TLS-encoded CertificateTimestamp
+}
+
+// CertificateTimestamp is the collection of data that the signature in an
+// SCT is over; see section 3.2.
+type CertificateTimestamp struct {
+ SCTVersion Version `tls:"maxval:255"`
+ SignatureType SignatureType `tls:"maxval:255"`
+ Timestamp uint64
+ EntryType LogEntryType `tls:"maxval:65535"`
+ X509Entry *ASN1Cert `tls:"selector:EntryType,val:0"`
+ PrecertEntry *PreCert `tls:"selector:EntryType,val:1"`
+ JSONEntry *JSONDataEntry `tls:"selector:EntryType,val:32768"`
+ Extensions CTExtensions `tls:"minlen:0,maxlen:65535"`
+}
+
+func (s SignedCertificateTimestamp) String() string {
+ return fmt.Sprintf("{Version:%d LogId:%s Timestamp:%d Extensions:'%s' Signature:%v}", s.SCTVersion,
+ base64.StdEncoding.EncodeToString(s.LogID.KeyID[:]),
+ s.Timestamp,
+ s.Extensions,
+ s.Signature)
+}
+
+// TimestampedEntry is part of the MerkleTreeLeaf structure; see section 3.4.
+type TimestampedEntry struct {
+ Timestamp uint64
+ EntryType LogEntryType `tls:"maxval:65535"`
+ X509Entry *ASN1Cert `tls:"selector:EntryType,val:0"`
+ PrecertEntry *PreCert `tls:"selector:EntryType,val:1"`
+ JSONEntry *JSONDataEntry `tls:"selector:EntryType,val:32768"`
+ Extensions CTExtensions `tls:"minlen:0,maxlen:65535"`
+}
+
+// MerkleTreeLeaf represents the deserialized structure of the hash input for the
+// leaves of a log's Merkle tree; see section 3.4.
+type MerkleTreeLeaf struct {
+ Version Version `tls:"maxval:255"`
+ LeafType MerkleLeafType `tls:"maxval:255"`
+ TimestampedEntry *TimestampedEntry `tls:"selector:LeafType,val:0"`
+}
+
+// Precertificate represents the parsed CT Precertificate structure.
+type Precertificate struct {
+ // DER-encoded pre-certificate as originally added, which includes a
+ // poison extension and a signature generated over the pre-cert by
+ // the pre-cert issuer (which might differ from the issuer of the final
+ // cert, see RFC6962 s3.1).
+ Submitted ASN1Cert
+ // SHA256 hash of the issuing key
+ IssuerKeyHash [sha256.Size]byte
+ // Parsed TBSCertificate structure, held in an x509.Certificate for convenience.
+ TBSCertificate *x509.Certificate
+}
+
+// X509Certificate returns the X.509 Certificate contained within the
+// MerkleTreeLeaf.
+func (m *MerkleTreeLeaf) X509Certificate() (*x509.Certificate, error) {
+ if m.TimestampedEntry.EntryType != X509LogEntryType {
+ return nil, fmt.Errorf("cannot call X509Certificate on a MerkleTreeLeaf that is not an X509 entry")
+ }
+ return x509.ParseCertificate(m.TimestampedEntry.X509Entry.Data)
+}
+
+// Precertificate returns the X.509 Precertificate contained within the MerkleTreeLeaf.
+//
+// The returned precertificate is embedded in an x509.Certificate, but is in the
+// form stored internally in the log rather than the original submitted form
+// (i.e. it does not include the poison extension and any changes to reflect the
+// final certificate's issuer have been made; see x509.BuildPrecertTBS).
+func (m *MerkleTreeLeaf) Precertificate() (*x509.Certificate, error) {
+ if m.TimestampedEntry.EntryType != PrecertLogEntryType {
+ return nil, fmt.Errorf("cannot call Precertificate on a MerkleTreeLeaf that is not a precert entry")
+ }
+ return x509.ParseTBSCertificate(m.TimestampedEntry.PrecertEntry.TBSCertificate)
+}
+
+// APIEndpoint is a string that represents one of the Certificate Transparency
+// Log API endpoints.
+type APIEndpoint string
+
+// Certificate Transparency Log API endpoints; see section 4.
+// WARNING: Should match the URI paths without the "/ct/v1/" prefix. If
+// changing these constants, may need to change those too.
+const (
+ AddChainStr APIEndpoint = "add-chain"
+ AddPreChainStr APIEndpoint = "add-pre-chain"
+ GetSTHStr APIEndpoint = "get-sth"
+ GetEntriesStr APIEndpoint = "get-entries"
+ GetProofByHashStr APIEndpoint = "get-proof-by-hash"
+ GetSTHConsistencyStr APIEndpoint = "get-sth-consistency"
+ GetRootsStr APIEndpoint = "get-roots"
+ GetEntryAndProofStr APIEndpoint = "get-entry-and-proof"
+)
+
+// URI paths for Log requests; see section 4.
+// WARNING: Should match the API endpoints, with the "/ct/v1/" prefix. If
+// changing these constants, may need to change those too.
+const (
+ AddChainPath = "/ct/v1/add-chain"
+ AddPreChainPath = "/ct/v1/add-pre-chain"
+ GetSTHPath = "/ct/v1/get-sth"
+ GetEntriesPath = "/ct/v1/get-entries"
+ GetProofByHashPath = "/ct/v1/get-proof-by-hash"
+ GetSTHConsistencyPath = "/ct/v1/get-sth-consistency"
+ GetRootsPath = "/ct/v1/get-roots"
+ GetEntryAndProofPath = "/ct/v1/get-entry-and-proof"
+
+ AddJSONPath = "/ct/v1/add-json" // Experimental addition
+)
+
+// AddChainRequest represents the JSON request body sent to the add-chain and
+// add-pre-chain POST methods from sections 4.1 and 4.2.
+type AddChainRequest struct {
+ Chain [][]byte `json:"chain"`
+}
+
+// AddChainResponse represents the JSON response to the add-chain and
+// add-pre-chain POST methods.
+// An SCT represents a Log's promise to integrate a [pre-]certificate into the
+// log within a defined period of time.
+type AddChainResponse struct {
+ SCTVersion Version `json:"sct_version"` // SCT structure version
+ ID []byte `json:"id"` // Log ID
+ Timestamp uint64 `json:"timestamp"` // Timestamp of issuance
+ Extensions string `json:"extensions"` // Holder for any CT extensions
+ Signature []byte `json:"signature"` // Log signature for this SCT
+}
+
+// ToSignedCertificateTimestamp creates a SignedCertificateTimestamp from the
+// AddChainResponse.
+func (r *AddChainResponse) ToSignedCertificateTimestamp() (*SignedCertificateTimestamp, error) {
+ sct := SignedCertificateTimestamp{
+ SCTVersion: r.SCTVersion,
+ Timestamp: r.Timestamp,
+ }
+
+ if len(r.ID) != sha256.Size {
+ return nil, fmt.Errorf("id is invalid length, expected %d got %d", sha256.Size, len(r.ID))
+ }
+ copy(sct.LogID.KeyID[:], r.ID)
+
+ exts, err := base64.StdEncoding.DecodeString(r.Extensions)
+ if err != nil {
+ return nil, fmt.Errorf("invalid base64 data in Extensions (%q): %v", r.Extensions, err)
+ }
+ sct.Extensions = CTExtensions(exts)
+
+ var ds DigitallySigned
+ if rest, err := tls.Unmarshal(r.Signature, &ds); err != nil {
+ return nil, fmt.Errorf("tls.Unmarshal(): %s", err)
+ } else if len(rest) > 0 {
+ return nil, fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest))
+ }
+ sct.Signature = ds
+
+ return &sct, nil
+}
+
+// AddJSONRequest represents the JSON request body sent to the add-json POST method.
+// The corresponding response re-uses AddChainResponse.
+// This is an experimental addition not covered by RFC6962.
+type AddJSONRequest struct {
+ Data interface{} `json:"data"`
+}
+
+// GetSTHResponse represents the JSON response to the get-sth GET method from section 4.3.
+type GetSTHResponse struct {
+ TreeSize uint64 `json:"tree_size"` // Number of certs in the current tree
+ Timestamp uint64 `json:"timestamp"` // Time that the tree was created
+ SHA256RootHash []byte `json:"sha256_root_hash"` // Root hash of the tree
+ TreeHeadSignature []byte `json:"tree_head_signature"` // Log signature for this STH
+}
+
+// ToSignedTreeHead creates a SignedTreeHead from the GetSTHResponse.
+func (r *GetSTHResponse) ToSignedTreeHead() (*SignedTreeHead, error) {
+ sth := SignedTreeHead{
+ TreeSize: r.TreeSize,
+ Timestamp: r.Timestamp,
+ }
+
+ if len(r.SHA256RootHash) != sha256.Size {
+ return nil, fmt.Errorf("sha256_root_hash is invalid length, expected %d got %d", sha256.Size, len(r.SHA256RootHash))
+ }
+ copy(sth.SHA256RootHash[:], r.SHA256RootHash)
+
+ var ds DigitallySigned
+ if rest, err := tls.Unmarshal(r.TreeHeadSignature, &ds); err != nil {
+ return nil, fmt.Errorf("tls.Unmarshal(): %s", err)
+ } else if len(rest) > 0 {
+ return nil, fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest))
+ }
+ sth.TreeHeadSignature = ds
+
+ return &sth, nil
+}
+
+// GetSTHConsistencyResponse represents the JSON response to the get-sth-consistency
+// GET method from section 4.4. (The corresponding GET request has parameters 'first' and
+// 'second'.)
+type GetSTHConsistencyResponse struct {
+ Consistency [][]byte `json:"consistency"`
+}
+
+// GetProofByHashResponse represents the JSON response to the get-proof-by-hash GET
+// method from section 4.5. (The corresponding GET request has parameters 'hash'
+// and 'tree_size'.)
+type GetProofByHashResponse struct {
+ LeafIndex int64 `json:"leaf_index"` // The 0-based index of the end entity corresponding to the "hash" parameter.
+ AuditPath [][]byte `json:"audit_path"` // An array of base64-encoded Merkle Tree nodes proving the inclusion of the chosen certificate.
+}
+
+// LeafEntry represents a leaf in the Log's Merkle tree, as returned by the get-entries
+// GET method from section 4.6.
+type LeafEntry struct {
+ // LeafInput is a TLS-encoded MerkleTreeLeaf
+ LeafInput []byte `json:"leaf_input"`
+ // ExtraData holds (unsigned) extra data, normally the cert validation chain.
+ ExtraData []byte `json:"extra_data"`
+}
+
+// GetEntriesResponse represents the JSON response to the get-entries GET method
+// from section 4.6.
+type GetEntriesResponse struct {
+ Entries []LeafEntry `json:"entries"` // the list of returned entries
+}
+
+// GetRootsResponse represents the JSON response to the get-roots GET method from section 4.7.
+type GetRootsResponse struct {
+ Certificates []string `json:"certificates"`
+}
+
+// GetEntryAndProofResponse represents the JSON response to the get-entry-and-proof
+// GET method from section 4.8. (The corresponding GET request has parameters 'leaf_index'
+// and 'tree_size'.)
+type GetEntryAndProofResponse struct {
+ LeafInput []byte `json:"leaf_input"` // the entry itself
+ ExtraData []byte `json:"extra_data"` // any chain provided when the entry was added to the log
+ AuditPath [][]byte `json:"audit_path"` // the corresponding proof
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/README.md b/vendor/github.com/google/certificate-transparency-go/x509/README.md
new file mode 100644
index 00000000000..6f22f5f8344
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/README.md
@@ -0,0 +1,7 @@
+# Important Notice
+
+This is a fork of the `crypto/x509` Go package. The original source can be found on
+[GitHub](https://github.com/golang/go).
+
+Be careful about making local modifications to this code as it will
+make maintenance harder in future.
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/cert_pool.go b/vendor/github.com/google/certificate-transparency-go/x509/cert_pool.go
new file mode 100644
index 00000000000..4823d594633
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/cert_pool.go
@@ -0,0 +1,159 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ "encoding/pem"
+ "errors"
+ "runtime"
+)
+
+// CertPool is a set of certificates.
+type CertPool struct {
+ bySubjectKeyId map[string][]int
+ byName map[string][]int
+ certs []*Certificate
+}
+
+// NewCertPool returns a new, empty CertPool.
+func NewCertPool() *CertPool {
+ return &CertPool{
+ bySubjectKeyId: make(map[string][]int),
+ byName: make(map[string][]int),
+ }
+}
+
+func (s *CertPool) copy() *CertPool {
+ p := &CertPool{
+ bySubjectKeyId: make(map[string][]int, len(s.bySubjectKeyId)),
+ byName: make(map[string][]int, len(s.byName)),
+ certs: make([]*Certificate, len(s.certs)),
+ }
+ for k, v := range s.bySubjectKeyId {
+ indexes := make([]int, len(v))
+ copy(indexes, v)
+ p.bySubjectKeyId[k] = indexes
+ }
+ for k, v := range s.byName {
+ indexes := make([]int, len(v))
+ copy(indexes, v)
+ p.byName[k] = indexes
+ }
+ copy(p.certs, s.certs)
+ return p
+}
+
+// SystemCertPool returns a copy of the system cert pool.
+//
+// Any mutations to the returned pool are not written to disk and do
+// not affect any other pool returned by SystemCertPool.
+//
+// New changes in the system cert pool might not be reflected
+// in subsequent calls.
+func SystemCertPool() (*CertPool, error) {
+ if runtime.GOOS == "windows" {
+ // Issue 16736, 18609:
+ return nil, errors.New("crypto/x509: system root pool is not available on Windows")
+ }
+
+ if sysRoots := systemRootsPool(); sysRoots != nil {
+ return sysRoots.copy(), nil
+ }
+
+ return loadSystemRoots()
+}
+
+// findPotentialParents returns the indexes of certificates in s which might
+// have signed cert. The caller must not modify the returned slice.
+func (s *CertPool) findPotentialParents(cert *Certificate) []int {
+ if s == nil {
+ return nil
+ }
+
+ var candidates []int
+ if len(cert.AuthorityKeyId) > 0 {
+ candidates = s.bySubjectKeyId[string(cert.AuthorityKeyId)]
+ }
+ if len(candidates) == 0 {
+ candidates = s.byName[string(cert.RawIssuer)]
+ }
+ return candidates
+}
+
+func (s *CertPool) contains(cert *Certificate) bool {
+ if s == nil {
+ return false
+ }
+
+ candidates := s.byName[string(cert.RawSubject)]
+ for _, c := range candidates {
+ if s.certs[c].Equal(cert) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// AddCert adds a certificate to a pool.
+func (s *CertPool) AddCert(cert *Certificate) {
+ if cert == nil {
+ panic("adding nil Certificate to CertPool")
+ }
+
+ // Check that the certificate isn't being added twice.
+ if s.contains(cert) {
+ return
+ }
+
+ n := len(s.certs)
+ s.certs = append(s.certs, cert)
+
+ if len(cert.SubjectKeyId) > 0 {
+ keyId := string(cert.SubjectKeyId)
+ s.bySubjectKeyId[keyId] = append(s.bySubjectKeyId[keyId], n)
+ }
+ name := string(cert.RawSubject)
+ s.byName[name] = append(s.byName[name], n)
+}
+
+// AppendCertsFromPEM attempts to parse a series of PEM encoded certificates.
+// It appends any certificates found to s and reports whether any certificates
+// were successfully parsed.
+//
+// On many Linux systems, /etc/ssl/cert.pem will contain the system wide set
+// of root CAs in a format suitable for this function.
+func (s *CertPool) AppendCertsFromPEM(pemCerts []byte) (ok bool) {
+ for len(pemCerts) > 0 {
+ var block *pem.Block
+ block, pemCerts = pem.Decode(pemCerts)
+ if block == nil {
+ break
+ }
+ if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
+ continue
+ }
+
+ cert, err := ParseCertificate(block.Bytes)
+ if IsFatal(err) {
+ continue
+ }
+
+ s.AddCert(cert)
+ ok = true
+ }
+
+ return
+}
+
+// Subjects returns a list of the DER-encoded subjects of
+// all of the certificates in the pool.
+func (s *CertPool) Subjects() [][]byte {
+ res := make([][]byte, len(s.certs))
+ for i, c := range s.certs {
+ res[i] = c.RawSubject
+ }
+ return res
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/curves.go b/vendor/github.com/google/certificate-transparency-go/x509/curves.go
new file mode 100644
index 00000000000..0e2778cb353
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/curves.go
@@ -0,0 +1,37 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ "crypto/elliptic"
+ "math/big"
+ "sync"
+)
+
+// This file holds ECC curves that are not supported by the main Go crypto/elliptic
+// library, but which have been observed in certificates in the wild.
+
+var initonce sync.Once
+var p192r1 *elliptic.CurveParams
+
+func initAllCurves() {
+ initSECP192R1()
+}
+
+func initSECP192R1() {
+ // See SEC-2, section 2.2.2
+ p192r1 = &elliptic.CurveParams{Name: "P-192"}
+ p192r1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF", 16)
+ p192r1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFF99DEF836146BC9B1B4D22831", 16)
+ p192r1.B, _ = new(big.Int).SetString("64210519E59C80E70FA7E9AB72243049FEB8DEECC146B9B1", 16)
+ p192r1.Gx, _ = new(big.Int).SetString("188DA80EB03090F67CBF20EB43A18800F4FF0AFD82FF1012", 16)
+ p192r1.Gy, _ = new(big.Int).SetString("07192B95FFC8DA78631011ED6B24CDD573F977A11E794811", 16)
+ p192r1.BitSize = 192
+}
+
+func secp192r1() elliptic.Curve {
+ initonce.Do(initAllCurves)
+ return p192r1
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/error.go b/vendor/github.com/google/certificate-transparency-go/x509/error.go
new file mode 100644
index 00000000000..40b7ef7d9fe
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/error.go
@@ -0,0 +1,236 @@
+package x509
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Error implements the error interface and describes a single error in an X.509 certificate or CRL.
+type Error struct {
+ ID ErrorID
+ Category ErrCategory
+ Summary string
+ Field string
+ SpecRef string
+ SpecText string
+ // Fatal indicates that parsing has been aborted.
+ Fatal bool
+}
+
+func (err Error) Error() string {
+ var msg bytes.Buffer
+ if err.ID != ErrInvalidID {
+ if err.Fatal {
+ msg.WriteRune('E')
+ } else {
+ msg.WriteRune('W')
+ }
+ msg.WriteString(fmt.Sprintf("%03d: ", err.ID))
+ }
+ msg.WriteString(err.Summary)
+ return msg.String()
+}
+
+// VerboseError creates a more verbose error string, including spec details.
+func (err Error) VerboseError() string {
+ var msg bytes.Buffer
+ msg.WriteString(err.Error())
+ if len(err.Field) > 0 || err.Category != UnknownCategory || len(err.SpecRef) > 0 || len(err.SpecText) > 0 {
+ msg.WriteString(" (")
+ needSep := false
+ if len(err.Field) > 0 {
+ msg.WriteString(err.Field)
+ needSep = true
+ }
+ if err.Category != UnknownCategory {
+ if needSep {
+ msg.WriteString(": ")
+ }
+ msg.WriteString(err.Category.String())
+ needSep = true
+ }
+ if len(err.SpecRef) > 0 {
+ if needSep {
+ msg.WriteString(": ")
+ }
+ msg.WriteString(err.SpecRef)
+ needSep = true
+ }
+ if len(err.SpecText) > 0 {
+ if needSep {
+ if len(err.SpecRef) > 0 {
+ msg.WriteString(", ")
+ } else {
+ msg.WriteString(": ")
+ }
+ }
+ msg.WriteString("'")
+ msg.WriteString(err.SpecText)
+ msg.WriteString("'")
+ }
+ msg.WriteString(")")
+ }
+
+ return msg.String()
+}
+
+// ErrCategory indicates the category of an x509.Error.
+type ErrCategory int
+
+// ErrCategory values.
+const (
+ UnknownCategory ErrCategory = iota
+ // Errors in ASN.1 encoding
+ InvalidASN1Encoding
+ InvalidASN1Content
+ InvalidASN1DER
+ // Errors in ASN.1 relative to schema
+ InvalidValueRange
+ InvalidASN1Type
+ UnexpectedAdditionalData
+ // Errors in X.509
+ PoorlyFormedCertificate // Fails a SHOULD clause
+ MalformedCertificate // Fails a MUST clause
+ PoorlyFormedCRL // Fails a SHOULD clause
+ MalformedCRL // Fails a MUST clause
+ // Errors relative to CA/Browser Forum guidelines
+ BaselineRequirementsFailure
+ EVRequirementsFailure
+ // Other errors
+ InsecureAlgorithm
+ UnrecognizedValue
+)
+
+func (category ErrCategory) String() string {
+ switch category {
+ case InvalidASN1Encoding:
+ return "Invalid ASN.1 encoding"
+ case InvalidASN1Content:
+ return "Invalid ASN.1 content"
+ case InvalidASN1DER:
+ return "Invalid ASN.1 distinguished encoding"
+ case InvalidValueRange:
+ return "Invalid value for range given in schema"
+ case InvalidASN1Type:
+ return "Invalid ASN.1 type for schema"
+ case UnexpectedAdditionalData:
+ return "Unexpected additional data present"
+ case PoorlyFormedCertificate:
+ return "Certificate does not comply with SHOULD clause in spec"
+ case MalformedCertificate:
+ return "Certificate does not comply with MUST clause in spec"
+ case PoorlyFormedCRL:
+ return "Certificate Revocation List does not comply with SHOULD clause in spec"
+ case MalformedCRL:
+ return "Certificate Revocation List does not comply with MUST clause in spec"
+ case BaselineRequirementsFailure:
+ return "Certificate does not comply with CA/BF baseline requirements"
+ case EVRequirementsFailure:
+ return "Certificate does not comply with CA/BF EV requirements"
+ case InsecureAlgorithm:
+ return "Certificate uses an insecure algorithm"
+ case UnrecognizedValue:
+ return "Certificate uses an unrecognized value"
+ default:
+ return fmt.Sprintf("Unknown (%d)", category)
+ }
+}
+
+// ErrorID is an identifier for an x509.Error, to allow filtering.
+type ErrorID int
+
+// Errors implements the error interface and holds a collection of errors found in a certificate or CRL.
+type Errors struct {
+ Errs []Error
+}
+
+// Error converts to a string.
+func (e *Errors) Error() string {
+ return e.combineErrors(Error.Error)
+}
+
+// VerboseError creates a more verbose error string, including spec details.
+func (e *Errors) VerboseError() string {
+ return e.combineErrors(Error.VerboseError)
+}
+
+// Fatal indicates whether e includes a fatal error
+func (e *Errors) Fatal() bool {
+ return (e.FirstFatal() != nil)
+}
+
+// Empty indicates whether e has no errors.
+func (e *Errors) Empty() bool {
+ if e == nil {
+ return true
+ }
+ return len(e.Errs) == 0
+}
+
+// FirstFatal returns the first fatal error in e, or nil
+// if there is no fatal error.
+func (e *Errors) FirstFatal() error {
+ if e == nil {
+ return nil
+ }
+ for _, err := range e.Errs {
+ if err.Fatal {
+ return err
+ }
+ }
+ return nil
+
+}
+
+// AddID adds the Error identified by the given id to an x509.Errors.
+func (e *Errors) AddID(id ErrorID, args ...interface{}) {
+ e.Errs = append(e.Errs, NewError(id, args...))
+}
+
+func (e Errors) combineErrors(errfn func(Error) string) string {
+ if len(e.Errs) == 0 {
+ return ""
+ }
+ if len(e.Errs) == 1 {
+ return errfn((e.Errs)[0])
+ }
+ var msg bytes.Buffer
+ msg.WriteString("Errors:")
+ for _, err := range e.Errs {
+ msg.WriteString("\n ")
+ msg.WriteString(errfn(err))
+ }
+ return msg.String()
+}
+
+// Filter creates a new Errors object with any entries from the filtered
+// list of IDs removed.
+func (e Errors) Filter(filtered []ErrorID) Errors {
+ var results Errors
+eloop:
+ for _, v := range e.Errs {
+ for _, f := range filtered {
+ if v.ID == f {
+ break eloop
+ }
+ }
+ results.Errs = append(results.Errs, v)
+ }
+ return results
+}
+
+// ErrorFilter builds a list of error IDs (suitable for use with Errors.Filter) from a comma-separated string.
+func ErrorFilter(ignore string) []ErrorID {
+ var ids []ErrorID
+ filters := strings.Split(ignore, ",")
+ for _, f := range filters {
+ v, err := strconv.Atoi(f)
+ if err != nil {
+ continue
+ }
+ ids = append(ids, ErrorID(v))
+ }
+ return ids
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/errors.go b/vendor/github.com/google/certificate-transparency-go/x509/errors.go
new file mode 100644
index 00000000000..ec2fe06a99f
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/errors.go
@@ -0,0 +1,302 @@
+package x509
+
+import "fmt"
+
+// To preserve error IDs, only append to this list, never insert.
+const (
+ ErrInvalidID ErrorID = iota
+ ErrInvalidCertList
+ ErrTrailingCertList
+ ErrUnexpectedlyCriticalCertListExtension
+ ErrUnexpectedlyNonCriticalCertListExtension
+ ErrInvalidCertListAuthKeyID
+ ErrTrailingCertListAuthKeyID
+ ErrInvalidCertListIssuerAltName
+ ErrInvalidCertListCRLNumber
+ ErrTrailingCertListCRLNumber
+ ErrNegativeCertListCRLNumber
+ ErrInvalidCertListDeltaCRL
+ ErrTrailingCertListDeltaCRL
+ ErrNegativeCertListDeltaCRL
+ ErrInvalidCertListIssuingDP
+ ErrTrailingCertListIssuingDP
+ ErrCertListIssuingDPMultipleTypes
+ ErrCertListIssuingDPInvalidFullName
+ ErrInvalidCertListFreshestCRL
+ ErrInvalidCertListAuthInfoAccess
+ ErrTrailingCertListAuthInfoAccess
+ ErrUnhandledCriticalCertListExtension
+ ErrUnexpectedlyCriticalRevokedCertExtension
+ ErrUnexpectedlyNonCriticalRevokedCertExtension
+ ErrInvalidRevocationReason
+ ErrTrailingRevocationReason
+ ErrInvalidRevocationInvalidityDate
+ ErrTrailingRevocationInvalidityDate
+ ErrInvalidRevocationIssuer
+ ErrUnhandledCriticalRevokedCertExtension
+
+ ErrMaxID
+)
+
+// idToError gives a template x509.Error for each defined ErrorID; where the Summary
+// field may hold format specifiers that take field parameters.
+var idToError map[ErrorID]Error
+
+var errorInfo = []Error{
+ {
+ ID: ErrInvalidCertList,
+ Summary: "x509: failed to parse CertificateList: %v",
+ Field: "CertificateList",
+ SpecRef: "RFC 5280 s5.1",
+ Category: InvalidASN1Content,
+ Fatal: true,
+ },
+ {
+ ID: ErrTrailingCertList,
+ Summary: "x509: trailing data after CertificateList",
+ Field: "CertificateList",
+ SpecRef: "RFC 5280 s5.1",
+ Category: InvalidASN1Content,
+ Fatal: true,
+ },
+
+ {
+ ID: ErrUnexpectedlyCriticalCertListExtension,
+ Summary: "x509: certificate list extension %v marked critical but expected to be non-critical",
+ Field: "tbsCertList.crlExtensions.*.critical",
+ SpecRef: "RFC 5280 s5.2",
+ Category: MalformedCRL,
+ },
+ {
+ ID: ErrUnexpectedlyNonCriticalCertListExtension,
+ Summary: "x509: certificate list extension %v marked non-critical but expected to be critical",
+ Field: "tbsCertList.crlExtensions.*.critical",
+ SpecRef: "RFC 5280 s5.2",
+ Category: MalformedCRL,
+ },
+
+ {
+ ID: ErrInvalidCertListAuthKeyID,
+ Summary: "x509: failed to unmarshal certificate-list authority key-id: %v",
+ Field: "tbsCertList.crlExtensions.*.AuthorityKeyIdentifier",
+ SpecRef: "RFC 5280 s5.2.1",
+ Category: InvalidASN1Content,
+ Fatal: true,
+ },
+ {
+ ID: ErrTrailingCertListAuthKeyID,
+ Summary: "x509: trailing data after certificate list auth key ID",
+ Field: "tbsCertList.crlExtensions.*.AuthorityKeyIdentifier",
+ SpecRef: "RFC 5280 s5.2.1",
+ Category: InvalidASN1Content,
+ Fatal: true,
+ },
+ {
+ ID: ErrInvalidCertListIssuerAltName,
+ Summary: "x509: failed to parse CRL issuer alt name: %v",
+ Field: "tbsCertList.crlExtensions.*.IssuerAltName",
+ SpecRef: "RFC 5280 s5.2.2",
+ Category: InvalidASN1Content,
+ Fatal: true,
+ },
+ {
+ ID: ErrInvalidCertListCRLNumber,
+ Summary: "x509: failed to unmarshal certificate-list crl-number: %v",
+ Field: "tbsCertList.crlExtensions.*.CRLNumber",
+ SpecRef: "RFC 5280 s5.2.3",
+ Category: InvalidASN1Content,
+ Fatal: true,
+ },
+ {
+ ID: ErrTrailingCertListCRLNumber,
+ Summary: "x509: trailing data after certificate list crl-number",
+ Field: "tbsCertList.crlExtensions.*.CRLNumber",
+ SpecRef: "RFC 5280 s5.2.3",
+ Category: InvalidASN1Content,
+ Fatal: true,
+ },
+ {
+ ID: ErrNegativeCertListCRLNumber,
+ Summary: "x509: negative certificate list crl-number: %d",
+ Field: "tbsCertList.crlExtensions.*.CRLNumber",
+ SpecRef: "RFC 5280 s5.2.3",
+ Category: MalformedCRL,
+ Fatal: true,
+ },
+ {
+ ID: ErrInvalidCertListDeltaCRL,
+ Summary: "x509: failed to unmarshal certificate-list delta-crl: %v",
+ Field: "tbsCertList.crlExtensions.*.BaseCRLNumber",
+ SpecRef: "RFC 5280 s5.2.4",
+ Category: InvalidASN1Content,
+ Fatal: true,
+ },
+ {
+ ID: ErrTrailingCertListDeltaCRL,
+ Summary: "x509: trailing data after certificate list delta-crl",
+ Field: "tbsCertList.crlExtensions.*.BaseCRLNumber",
+ SpecRef: "RFC 5280 s5.2.4",
+ Category: InvalidASN1Content,
+ Fatal: true,
+ },
+ {
+ ID: ErrNegativeCertListDeltaCRL,
+ Summary: "x509: negative certificate list base-crl-number: %d",
+ Field: "tbsCertList.crlExtensions.*.BaseCRLNumber",
+ SpecRef: "RFC 5280 s5.2.4",
+ Category: MalformedCRL,
+ Fatal: true,
+ },
+ {
+ ID: ErrInvalidCertListIssuingDP,
+ Summary: "x509: failed to unmarshal certificate list issuing distribution point: %v",
+ Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint",
+ SpecRef: "RFC 5280 s5.2.5",
+ Category: InvalidASN1Content,
+ Fatal: true,
+ },
+ {
+ ID: ErrTrailingCertListIssuingDP,
+ Summary: "x509: trailing data after certificate list issuing distribution point",
+ Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint",
+ SpecRef: "RFC 5280 s5.2.5",
+ Category: InvalidASN1Content,
+ Fatal: true,
+ },
+ {
+ ID: ErrCertListIssuingDPMultipleTypes,
+ Summary: "x509: multiple cert types set in issuing-distribution-point: user:%v CA:%v attr:%v",
+ Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint",
+ SpecRef: "RFC 5280 s5.2.5",
+ SpecText: "at most one of onlyContainsUserCerts, onlyContainsCACerts, and onlyContainsAttributeCerts may be set to TRUE.",
+ Category: MalformedCRL,
+ Fatal: true,
+ },
+ {
+ ID: ErrCertListIssuingDPInvalidFullName,
+ Summary: "x509: failed to parse CRL issuing-distribution-point fullName: %v",
+ Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint.distributionPoint",
+ SpecRef: "RFC 5280 s5.2.5",
+ Category: InvalidASN1Content,
+ Fatal: true,
+ },
+ {
+ ID: ErrInvalidCertListFreshestCRL,
+ Summary: "x509: failed to unmarshal certificate list freshestCRL: %v",
+ Field: "tbsCertList.crlExtensions.*.FreshestCRL",
+ SpecRef: "RFC 5280 s5.2.6",
+ Category: InvalidASN1Content,
+ Fatal: true,
+ },
+ {
+ ID: ErrInvalidCertListAuthInfoAccess,
+ Summary: "x509: failed to unmarshal certificate list authority info access: %v",
+ Field: "tbsCertList.crlExtensions.*.AuthorityInfoAccess",
+ SpecRef: "RFC 5280 s5.2.7",
+ Category: InvalidASN1Content,
+ Fatal: true,
+ },
+ {
+ ID: ErrTrailingCertListAuthInfoAccess,
+ Summary: "x509: trailing data after certificate list authority info access",
+ Field: "tbsCertList.crlExtensions.*.AuthorityInfoAccess",
+ SpecRef: "RFC 5280 s5.2.7",
+ Category: InvalidASN1Content,
+ Fatal: true,
+ },
+ {
+ ID: ErrUnhandledCriticalCertListExtension,
+ Summary: "x509: unhandled critical extension in certificate list: %v",
+ Field: "tbsCertList.revokedCertificates.crlExtensions.*",
+ SpecRef: "RFC 5280 s5.2",
+ SpecText: "If a CRL contains a critical extension that the application cannot process, then the application MUST NOT use that CRL to determine the status of certificates.",
+ Category: MalformedCRL,
+ Fatal: true,
+ },
+
+ {
+ ID: ErrUnexpectedlyCriticalRevokedCertExtension,
+ Summary: "x509: revoked certificate extension %v marked critical but expected to be non-critical",
+ Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.critical",
+ SpecRef: "RFC 5280 s5.3",
+ Category: MalformedCRL,
+ },
+ {
+ ID: ErrUnexpectedlyNonCriticalRevokedCertExtension,
+ Summary: "x509: revoked certificate extension %v marked non-critical but expected to be critical",
+ Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.critical",
+ SpecRef: "RFC 5280 s5.3",
+ Category: MalformedCRL,
+ },
+
+ {
+ ID: ErrInvalidRevocationReason,
+ Summary: "x509: failed to parse revocation reason: %v",
+ Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.CRLReason",
+ SpecRef: "RFC 5280 s5.3.1",
+ Category: InvalidASN1Content,
+ Fatal: true,
+ },
+ {
+ ID: ErrTrailingRevocationReason,
+ Summary: "x509: trailing data after revoked certificate reason",
+ Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.CRLReason",
+ SpecRef: "RFC 5280 s5.3.1",
+ Category: InvalidASN1Content,
+ Fatal: true,
+ },
+ {
+ ID: ErrInvalidRevocationInvalidityDate,
+ Summary: "x509: failed to parse revoked certificate invalidity date: %v",
+ Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.InvalidityDate",
+ SpecRef: "RFC 5280 s5.3.2",
+ Category: InvalidASN1Content,
+ Fatal: true,
+ },
+ {
+ ID: ErrTrailingRevocationInvalidityDate,
+ Summary: "x509: trailing data after revoked certificate invalidity date",
+ Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.InvalidityDate",
+ SpecRef: "RFC 5280 s5.3.2",
+ Category: InvalidASN1Content,
+ Fatal: true,
+ },
+ {
+ ID: ErrInvalidRevocationIssuer,
+ Summary: "x509: failed to parse revocation issuer %v",
+ Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.CertificateIssuer",
+ SpecRef: "RFC 5280 s5.3.3",
+ Category: InvalidASN1Content,
+ Fatal: true,
+ },
+ {
+ ID: ErrUnhandledCriticalRevokedCertExtension,
+ Summary: "x509: unhandled critical extension in revoked certificate: %v",
+ Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*",
+ SpecRef: "RFC 5280 s5.3",
+ SpecText: "If a CRL contains a critical CRL entry extension that the application cannot process, then the application MUST NOT use that CRL to determine the status of any certificates.",
+ Category: MalformedCRL,
+ Fatal: true,
+ },
+}
+
+func init() {
+ idToError = make(map[ErrorID]Error, len(errorInfo))
+ for _, info := range errorInfo {
+ idToError[info.ID] = info
+ }
+}
+
+// NewError builds a new x509.Error based on the template for the given id.
+func NewError(id ErrorID, args ...interface{}) Error {
+ var err Error
+ if id >= ErrMaxID {
+ err.ID = id
+ err.Summary = fmt.Sprintf("Unknown error ID %v: args %+v", id, args)
+ err.Fatal = true
+ } else {
+ err = idToError[id]
+ err.Summary = fmt.Sprintf(err.Summary, args...)
+ }
+ return err
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/names.go b/vendor/github.com/google/certificate-transparency-go/x509/names.go
new file mode 100644
index 00000000000..4829edeb04b
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/names.go
@@ -0,0 +1,165 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ "fmt"
+ "net"
+
+ "github.com/google/certificate-transparency-go/asn1"
+ "github.com/google/certificate-transparency-go/x509/pkix"
+)
+
+const (
+ // GeneralName tag values from RFC 5280, 4.2.1.6
+ tagOtherName = 0
+ tagRFC822Name = 1
+ tagDNSName = 2
+ tagX400Address = 3
+ tagDirectoryName = 4
+ tagEDIPartyName = 5
+ tagURI = 6
+ tagIPAddress = 7
+ tagRegisteredID = 8
+)
+
+// OtherName describes a name related to a certificate which is not in one
+// of the standard name formats. RFC 5280, 4.2.1.6:
+//
+// OtherName ::= SEQUENCE {
+// type-id OBJECT IDENTIFIER,
+// value [0] EXPLICIT ANY DEFINED BY type-id }
+type OtherName struct {
+ TypeID asn1.ObjectIdentifier
+ Value asn1.RawValue
+}
+
+// GeneralNames holds a collection of names related to a certificate.
+type GeneralNames struct {
+ DNSNames []string
+ EmailAddresses []string
+ DirectoryNames []pkix.Name
+ URIs []string
+ IPNets []net.IPNet
+ RegisteredIDs []asn1.ObjectIdentifier
+ OtherNames []OtherName
+}
+
+// Len returns the total number of names in a GeneralNames object.
+func (gn GeneralNames) Len() int {
+ return (len(gn.DNSNames) + len(gn.EmailAddresses) + len(gn.DirectoryNames) +
+ len(gn.URIs) + len(gn.IPNets) + len(gn.RegisteredIDs) + len(gn.OtherNames))
+}
+
+// Empty indicates whether a GeneralNames object is empty.
+func (gn GeneralNames) Empty() bool {
+ return gn.Len() == 0
+}
+
+func parseGeneralNames(value []byte, gname *GeneralNames) error {
+ // RFC 5280, 4.2.1.6
+ // GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName
+ //
+ // GeneralName ::= CHOICE {
+ // otherName [0] OtherName,
+ // rfc822Name [1] IA5String,
+ // dNSName [2] IA5String,
+ // x400Address [3] ORAddress,
+ // directoryName [4] Name,
+ // ediPartyName [5] EDIPartyName,
+ // uniformResourceIdentifier [6] IA5String,
+ // iPAddress [7] OCTET STRING,
+ // registeredID [8] OBJECT IDENTIFIER }
+ var seq asn1.RawValue
+ var rest []byte
+ if rest, err := asn1.Unmarshal(value, &seq); err != nil {
+ return fmt.Errorf("x509: failed to parse GeneralNames: %v", err)
+ } else if len(rest) != 0 {
+ return fmt.Errorf("x509: trailing data after GeneralNames")
+ }
+ if !seq.IsCompound || seq.Tag != asn1.TagSequence || seq.Class != asn1.ClassUniversal {
+ return fmt.Errorf("x509: failed to parse GeneralNames sequence, tag %+v", seq)
+ }
+
+ rest = seq.Bytes
+ for len(rest) > 0 {
+ var err error
+ rest, err = parseGeneralName(rest, gname, false)
+ if err != nil {
+ return fmt.Errorf("x509: failed to parse GeneralName: %v", err)
+ }
+ }
+ return nil
+}
+
+func parseGeneralName(data []byte, gname *GeneralNames, withMask bool) ([]byte, error) {
+ var v asn1.RawValue
+ var rest []byte
+ var err error
+ rest, err = asn1.Unmarshal(data, &v)
+ if err != nil {
+ return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames: %v", err)
+ }
+ switch v.Tag {
+ case tagOtherName:
+ if !v.IsCompound {
+ return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.otherName: not compound")
+ }
+ var other OtherName
+ v.FullBytes = append([]byte{}, v.FullBytes...)
+ v.FullBytes[0] = asn1.TagSequence | 0x20
+ _, err = asn1.Unmarshal(v.FullBytes, &other)
+ if err != nil {
+ return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.otherName: %v", err)
+ }
+ gname.OtherNames = append(gname.OtherNames, other)
+ case tagRFC822Name:
+ gname.EmailAddresses = append(gname.EmailAddresses, string(v.Bytes))
+ case tagDNSName:
+ dns := string(v.Bytes)
+ gname.DNSNames = append(gname.DNSNames, dns)
+ case tagDirectoryName:
+ var rdnSeq pkix.RDNSequence
+ if _, err := asn1.Unmarshal(v.Bytes, &rdnSeq); err != nil {
+ return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.directoryName: %v", err)
+ }
+ var dirName pkix.Name
+ dirName.FillFromRDNSequence(&rdnSeq)
+ gname.DirectoryNames = append(gname.DirectoryNames, dirName)
+ case tagURI:
+ gname.URIs = append(gname.URIs, string(v.Bytes))
+ case tagIPAddress:
+ vlen := len(v.Bytes)
+ if withMask {
+ switch vlen {
+ case (2 * net.IPv4len), (2 * net.IPv6len):
+ ipNet := net.IPNet{IP: v.Bytes[0 : vlen/2], Mask: v.Bytes[vlen/2:]}
+ gname.IPNets = append(gname.IPNets, ipNet)
+ default:
+ return nil, fmt.Errorf("x509: invalid IP/mask length %d in GeneralNames.iPAddress", vlen)
+ }
+ } else {
+ switch vlen {
+ case net.IPv4len, net.IPv6len:
+ ipNet := net.IPNet{IP: v.Bytes}
+ gname.IPNets = append(gname.IPNets, ipNet)
+ default:
+ return nil, fmt.Errorf("x509: invalid IP length %d in GeneralNames.iPAddress", vlen)
+ }
+ }
+ case tagRegisteredID:
+ var oid asn1.ObjectIdentifier
+ v.FullBytes = append([]byte{}, v.FullBytes...)
+ v.FullBytes[0] = asn1.TagOID
+ _, err = asn1.Unmarshal(v.FullBytes, &oid)
+ if err != nil {
+ return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.registeredID: %v", err)
+ }
+ gname.RegisteredIDs = append(gname.RegisteredIDs, oid)
+ default:
+ return nil, fmt.Errorf("x509: failed to unmarshal GeneralName: unknown tag %d", v.Tag)
+ }
+ return rest, nil
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/pem_decrypt.go b/vendor/github.com/google/certificate-transparency-go/x509/pem_decrypt.go
new file mode 100644
index 00000000000..93d1e4a922d
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/pem_decrypt.go
@@ -0,0 +1,240 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+// RFC 1423 describes the encryption of PEM blocks. The algorithm used to
+// generate a key from the password was derived by looking at the OpenSSL
+// implementation.
+
+import (
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/des"
+ "crypto/md5"
+ "encoding/hex"
+ "encoding/pem"
+ "errors"
+ "io"
+ "strings"
+)
+
+type PEMCipher int
+
+// Possible values for the EncryptPEMBlock encryption algorithm.
+const (
+ _ PEMCipher = iota
+ PEMCipherDES
+ PEMCipher3DES
+ PEMCipherAES128
+ PEMCipherAES192
+ PEMCipherAES256
+)
+
+// rfc1423Algo holds a method for enciphering a PEM block.
+type rfc1423Algo struct {
+ cipher PEMCipher
+ name string
+ cipherFunc func(key []byte) (cipher.Block, error)
+ keySize int
+ blockSize int
+}
+
+// rfc1423Algos holds a slice of the possible ways to encrypt a PEM
+// block. The ivSize numbers were taken from the OpenSSL source.
+var rfc1423Algos = []rfc1423Algo{{
+ cipher: PEMCipherDES,
+ name: "DES-CBC",
+ cipherFunc: des.NewCipher,
+ keySize: 8,
+ blockSize: des.BlockSize,
+}, {
+ cipher: PEMCipher3DES,
+ name: "DES-EDE3-CBC",
+ cipherFunc: des.NewTripleDESCipher,
+ keySize: 24,
+ blockSize: des.BlockSize,
+}, {
+ cipher: PEMCipherAES128,
+ name: "AES-128-CBC",
+ cipherFunc: aes.NewCipher,
+ keySize: 16,
+ blockSize: aes.BlockSize,
+}, {
+ cipher: PEMCipherAES192,
+ name: "AES-192-CBC",
+ cipherFunc: aes.NewCipher,
+ keySize: 24,
+ blockSize: aes.BlockSize,
+}, {
+ cipher: PEMCipherAES256,
+ name: "AES-256-CBC",
+ cipherFunc: aes.NewCipher,
+ keySize: 32,
+ blockSize: aes.BlockSize,
+},
+}
+
+// deriveKey uses a key derivation function to stretch the password into a key
+// with the number of bits our cipher requires. This algorithm was derived from
+// the OpenSSL source.
+func (c rfc1423Algo) deriveKey(password, salt []byte) []byte {
+ hash := md5.New()
+ out := make([]byte, c.keySize)
+ var digest []byte
+
+ for i := 0; i < len(out); i += len(digest) {
+ hash.Reset()
+ hash.Write(digest)
+ hash.Write(password)
+ hash.Write(salt)
+ digest = hash.Sum(digest[:0])
+ copy(out[i:], digest)
+ }
+ return out
+}
+
+// IsEncryptedPEMBlock returns if the PEM block is password encrypted.
+func IsEncryptedPEMBlock(b *pem.Block) bool {
+ _, ok := b.Headers["DEK-Info"]
+ return ok
+}
+
+// IncorrectPasswordError is returned when an incorrect password is detected.
+var IncorrectPasswordError = errors.New("x509: decryption password incorrect")
+
+// DecryptPEMBlock takes a password encrypted PEM block and the password used to
+// encrypt it and returns a slice of decrypted DER encoded bytes. It inspects
+// the DEK-Info header to determine the algorithm used for decryption. If no
+// DEK-Info header is present, an error is returned. If an incorrect password
+// is detected an IncorrectPasswordError is returned. Because of deficiencies
+// in the encrypted-PEM format, it's not always possible to detect an incorrect
+// password. In these cases no error will be returned but the decrypted DER
+// bytes will be random noise.
+func DecryptPEMBlock(b *pem.Block, password []byte) ([]byte, error) {
+ dek, ok := b.Headers["DEK-Info"]
+ if !ok {
+ return nil, errors.New("x509: no DEK-Info header in block")
+ }
+
+ idx := strings.Index(dek, ",")
+ if idx == -1 {
+ return nil, errors.New("x509: malformed DEK-Info header")
+ }
+
+ mode, hexIV := dek[:idx], dek[idx+1:]
+ ciph := cipherByName(mode)
+ if ciph == nil {
+ return nil, errors.New("x509: unknown encryption mode")
+ }
+ iv, err := hex.DecodeString(hexIV)
+ if err != nil {
+ return nil, err
+ }
+ if len(iv) != ciph.blockSize {
+ return nil, errors.New("x509: incorrect IV size")
+ }
+
+ // Based on the OpenSSL implementation. The salt is the first 8 bytes
+ // of the initialization vector.
+ key := ciph.deriveKey(password, iv[:8])
+ block, err := ciph.cipherFunc(key)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(b.Bytes)%block.BlockSize() != 0 {
+ return nil, errors.New("x509: encrypted PEM data is not a multiple of the block size")
+ }
+
+ data := make([]byte, len(b.Bytes))
+ dec := cipher.NewCBCDecrypter(block, iv)
+ dec.CryptBlocks(data, b.Bytes)
+
+ // Blocks are padded using a scheme where the last n bytes of padding are all
+ // equal to n. It can pad from 1 to blocksize bytes inclusive. See RFC 1423.
+ // For example:
+ // [x y z 2 2]
+ // [x y 7 7 7 7 7 7 7]
+ // If we detect a bad padding, we assume it is an invalid password.
+ dlen := len(data)
+ if dlen == 0 || dlen%ciph.blockSize != 0 {
+ return nil, errors.New("x509: invalid padding")
+ }
+ last := int(data[dlen-1])
+ if dlen < last {
+ return nil, IncorrectPasswordError
+ }
+ if last == 0 || last > ciph.blockSize {
+ return nil, IncorrectPasswordError
+ }
+ for _, val := range data[dlen-last:] {
+ if int(val) != last {
+ return nil, IncorrectPasswordError
+ }
+ }
+ return data[:dlen-last], nil
+}
+
+// EncryptPEMBlock returns a PEM block of the specified type holding the
+// given DER-encoded data encrypted with the specified algorithm and
+// password.
+func EncryptPEMBlock(rand io.Reader, blockType string, data, password []byte, alg PEMCipher) (*pem.Block, error) {
+ ciph := cipherByKey(alg)
+ if ciph == nil {
+ return nil, errors.New("x509: unknown encryption mode")
+ }
+ iv := make([]byte, ciph.blockSize)
+ if _, err := io.ReadFull(rand, iv); err != nil {
+ return nil, errors.New("x509: cannot generate IV: " + err.Error())
+ }
+ // The salt is the first 8 bytes of the initialization vector,
+ // matching the key derivation in DecryptPEMBlock.
+ key := ciph.deriveKey(password, iv[:8])
+ block, err := ciph.cipherFunc(key)
+ if err != nil {
+ return nil, err
+ }
+ enc := cipher.NewCBCEncrypter(block, iv)
+ pad := ciph.blockSize - len(data)%ciph.blockSize
+ encrypted := make([]byte, len(data), len(data)+pad)
+ // We could save this copy by encrypting all the whole blocks in
+ // the data separately, but it doesn't seem worth the additional
+ // code.
+ copy(encrypted, data)
+ // See RFC 1423, Section 1.1.
+ for i := 0; i < pad; i++ {
+ encrypted = append(encrypted, byte(pad))
+ }
+ enc.CryptBlocks(encrypted, encrypted)
+
+ return &pem.Block{
+ Type: blockType,
+ Headers: map[string]string{
+ "Proc-Type": "4,ENCRYPTED",
+ "DEK-Info": ciph.name + "," + hex.EncodeToString(iv),
+ },
+ Bytes: encrypted,
+ }, nil
+}
+
+func cipherByName(name string) *rfc1423Algo {
+ for i := range rfc1423Algos {
+ alg := &rfc1423Algos[i]
+ if alg.name == name {
+ return alg
+ }
+ }
+ return nil
+}
+
+func cipherByKey(key PEMCipher) *rfc1423Algo {
+ for i := range rfc1423Algos {
+ alg := &rfc1423Algos[i]
+ if alg.cipher == key {
+ return alg
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/pkcs1.go b/vendor/github.com/google/certificate-transparency-go/x509/pkcs1.go
new file mode 100644
index 00000000000..bea05b57fd8
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/pkcs1.go
@@ -0,0 +1,174 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ "crypto/rsa"
+ "errors"
+ "math/big"
+
+ "github.com/google/certificate-transparency-go/asn1"
+)
+
+// pkcs1PrivateKey is a structure which mirrors the PKCS#1 ASN.1 for an RSA private key.
+type pkcs1PrivateKey struct {
+ Version int
+ N *big.Int
+ E int
+ D *big.Int
+ P *big.Int
+ Q *big.Int
+ // We ignore these values, if present, because rsa will calculate them.
+ Dp *big.Int `asn1:"optional"`
+ Dq *big.Int `asn1:"optional"`
+ Qinv *big.Int `asn1:"optional"`
+
+ AdditionalPrimes []pkcs1AdditionalRSAPrime `asn1:"optional,omitempty"`
+}
+
+type pkcs1AdditionalRSAPrime struct {
+ Prime *big.Int
+
+ // We ignore these values because rsa will calculate them.
+ Exp *big.Int
+ Coeff *big.Int
+}
+
+// pkcs1PublicKey reflects the ASN.1 structure of a PKCS#1 public key.
+type pkcs1PublicKey struct {
+ N *big.Int
+ E int
+}
+
+// ParsePKCS1PrivateKey parses an RSA private key in PKCS#1, ASN.1 DER form.
+//
+// This kind of key is commonly encoded in PEM blocks of type "RSA PRIVATE KEY".
+func ParsePKCS1PrivateKey(der []byte) (*rsa.PrivateKey, error) {
+ var priv pkcs1PrivateKey
+ rest, err := asn1.Unmarshal(der, &priv)
+ if len(rest) > 0 {
+ return nil, asn1.SyntaxError{Msg: "trailing data"}
+ }
+ if err != nil {
+ if _, err := asn1.Unmarshal(der, &ecPrivateKey{}); err == nil {
+ return nil, errors.New("x509: failed to parse private key (use ParseECPrivateKey instead for this key format)")
+ }
+ if _, err := asn1.Unmarshal(der, &pkcs8{}); err == nil {
+ return nil, errors.New("x509: failed to parse private key (use ParsePKCS8PrivateKey instead for this key format)")
+ }
+ return nil, err
+ }
+
+ if priv.Version > 1 {
+ return nil, errors.New("x509: unsupported private key version")
+ }
+
+ if priv.N.Sign() <= 0 || priv.D.Sign() <= 0 || priv.P.Sign() <= 0 || priv.Q.Sign() <= 0 {
+ return nil, errors.New("x509: private key contains zero or negative value")
+ }
+
+ key := new(rsa.PrivateKey)
+ key.PublicKey = rsa.PublicKey{
+ E: priv.E,
+ N: priv.N,
+ }
+
+ key.D = priv.D
+ key.Primes = make([]*big.Int, 2+len(priv.AdditionalPrimes))
+ key.Primes[0] = priv.P
+ key.Primes[1] = priv.Q
+ for i, a := range priv.AdditionalPrimes {
+ if a.Prime.Sign() <= 0 {
+ return nil, errors.New("x509: private key contains zero or negative prime")
+ }
+ key.Primes[i+2] = a.Prime
+ // We ignore the other two values because rsa will calculate
+ // them as needed.
+ }
+
+ err = key.Validate()
+ if err != nil {
+ return nil, err
+ }
+ key.Precompute()
+
+ return key, nil
+}
+
+// MarshalPKCS1PrivateKey converts an RSA private key to PKCS#1, ASN.1 DER form.
+//
+// This kind of key is commonly encoded in PEM blocks of type "RSA PRIVATE KEY".
+// For a more flexible key format which is not RSA specific, use
+// MarshalPKCS8PrivateKey.
+func MarshalPKCS1PrivateKey(key *rsa.PrivateKey) []byte {
+ key.Precompute()
+
+ version := 0
+ if len(key.Primes) > 2 {
+ version = 1
+ }
+
+ priv := pkcs1PrivateKey{
+ Version: version,
+ N: key.N,
+ E: key.PublicKey.E,
+ D: key.D,
+ P: key.Primes[0],
+ Q: key.Primes[1],
+ Dp: key.Precomputed.Dp,
+ Dq: key.Precomputed.Dq,
+ Qinv: key.Precomputed.Qinv,
+ }
+
+ priv.AdditionalPrimes = make([]pkcs1AdditionalRSAPrime, len(key.Precomputed.CRTValues))
+ for i, values := range key.Precomputed.CRTValues {
+ priv.AdditionalPrimes[i].Prime = key.Primes[2+i]
+ priv.AdditionalPrimes[i].Exp = values.Exp
+ priv.AdditionalPrimes[i].Coeff = values.Coeff
+ }
+
+ b, _ := asn1.Marshal(priv)
+ return b
+}
+
+// ParsePKCS1PublicKey parses an RSA public key in PKCS#1, ASN.1 DER form.
+//
+// This kind of key is commonly encoded in PEM blocks of type "RSA PUBLIC KEY".
+func ParsePKCS1PublicKey(der []byte) (*rsa.PublicKey, error) {
+ var pub pkcs1PublicKey
+ rest, err := asn1.Unmarshal(der, &pub)
+ if err != nil {
+ if _, err := asn1.Unmarshal(der, &publicKeyInfo{}); err == nil {
+ return nil, errors.New("x509: failed to parse public key (use ParsePKIXPublicKey instead for this key format)")
+ }
+ return nil, err
+ }
+ if len(rest) > 0 {
+ return nil, asn1.SyntaxError{Msg: "trailing data"}
+ }
+
+ if pub.N.Sign() <= 0 || pub.E <= 0 {
+ return nil, errors.New("x509: public key contains zero or negative value")
+ }
+ if pub.E > 1<<31-1 {
+ return nil, errors.New("x509: public key contains large public exponent")
+ }
+
+ return &rsa.PublicKey{
+ E: pub.E,
+ N: pub.N,
+ }, nil
+}
+
+// MarshalPKCS1PublicKey converts an RSA public key to PKCS#1, ASN.1 DER form.
+//
+// This kind of key is commonly encoded in PEM blocks of type "RSA PUBLIC KEY".
+func MarshalPKCS1PublicKey(key *rsa.PublicKey) []byte {
+ derBytes, _ := asn1.Marshal(pkcs1PublicKey{
+ N: key.N,
+ E: key.E,
+ })
+ return derBytes
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/pkcs8.go b/vendor/github.com/google/certificate-transparency-go/x509/pkcs8.go
new file mode 100644
index 00000000000..a144eb6a5d4
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/pkcs8.go
@@ -0,0 +1,139 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "errors"
+ "fmt"
+
+ "github.com/google/certificate-transparency-go/asn1"
+ "github.com/google/certificate-transparency-go/x509/pkix"
+
+ // TODO(robpercival): change this to crypto/ed25519 when Go 1.13 is min version
+ "golang.org/x/crypto/ed25519"
+)
+
+// pkcs8 reflects an ASN.1, PKCS#8 PrivateKey. See
+// ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-8/pkcs-8v1_2.asn
+// and RFC 5208.
+type pkcs8 struct {
+ Version int
+ Algo pkix.AlgorithmIdentifier
+ PrivateKey []byte
+ // optional attributes omitted.
+}
+
+// ParsePKCS8PrivateKey parses an unencrypted private key in PKCS#8, ASN.1 DER form.
+//
+// It returns a *rsa.PrivateKey, a *ecdsa.PrivateKey, or a ed25519.PrivateKey.
+// More types might be supported in the future.
+//
+// This kind of key is commonly encoded in PEM blocks of type "PRIVATE KEY".
+func ParsePKCS8PrivateKey(der []byte) (key interface{}, err error) {
+ var privKey pkcs8
+ if _, err := asn1.Unmarshal(der, &privKey); err != nil {
+ if _, err := asn1.Unmarshal(der, &ecPrivateKey{}); err == nil {
+ return nil, errors.New("x509: failed to parse private key (use ParseECPrivateKey instead for this key format)")
+ }
+ if _, err := asn1.Unmarshal(der, &pkcs1PrivateKey{}); err == nil {
+ return nil, errors.New("x509: failed to parse private key (use ParsePKCS1PrivateKey instead for this key format)")
+ }
+ return nil, err
+ }
+ switch {
+ case privKey.Algo.Algorithm.Equal(OIDPublicKeyRSA):
+ key, err = ParsePKCS1PrivateKey(privKey.PrivateKey)
+ if err != nil {
+ return nil, errors.New("x509: failed to parse RSA private key embedded in PKCS#8: " + err.Error())
+ }
+ return key, nil
+
+ case privKey.Algo.Algorithm.Equal(OIDPublicKeyECDSA):
+ bytes := privKey.Algo.Parameters.FullBytes
+ namedCurveOID := new(asn1.ObjectIdentifier)
+ if _, err := asn1.Unmarshal(bytes, namedCurveOID); err != nil {
+ namedCurveOID = nil
+ }
+ key, err = parseECPrivateKey(namedCurveOID, privKey.PrivateKey)
+ if err != nil {
+ return nil, errors.New("x509: failed to parse EC private key embedded in PKCS#8: " + err.Error())
+ }
+ return key, nil
+
+ case privKey.Algo.Algorithm.Equal(OIDPublicKeyEd25519):
+ if l := len(privKey.Algo.Parameters.FullBytes); l != 0 {
+ return nil, errors.New("x509: invalid Ed25519 private key parameters")
+ }
+ var curvePrivateKey []byte
+ if _, err := asn1.Unmarshal(privKey.PrivateKey, &curvePrivateKey); err != nil {
+ return nil, fmt.Errorf("x509: invalid Ed25519 private key: %v", err)
+ }
+ if l := len(curvePrivateKey); l != ed25519.SeedSize {
+ return nil, fmt.Errorf("x509: invalid Ed25519 private key length: %d", l)
+ }
+ return ed25519.NewKeyFromSeed(curvePrivateKey), nil
+
+ default:
+ return nil, fmt.Errorf("x509: PKCS#8 wrapping contained private key with unknown algorithm: %v", privKey.Algo.Algorithm)
+ }
+}
+
+// MarshalPKCS8PrivateKey converts a private key to PKCS#8, ASN.1 DER form.
+//
+// The following key types are currently supported: *rsa.PrivateKey, *ecdsa.PrivateKey
+// and ed25519.PrivateKey. Unsupported key types result in an error.
+//
+// This kind of key is commonly encoded in PEM blocks of type "PRIVATE KEY".
+func MarshalPKCS8PrivateKey(key interface{}) ([]byte, error) {
+ var privKey pkcs8
+
+ switch k := key.(type) {
+ case *rsa.PrivateKey:
+ privKey.Algo = pkix.AlgorithmIdentifier{
+ Algorithm: OIDPublicKeyRSA,
+ Parameters: asn1.NullRawValue,
+ }
+ privKey.PrivateKey = MarshalPKCS1PrivateKey(k)
+
+ case *ecdsa.PrivateKey:
+ oid, ok := OIDFromNamedCurve(k.Curve)
+ if !ok {
+ return nil, errors.New("x509: unknown curve while marshaling to PKCS#8")
+ }
+
+ oidBytes, err := asn1.Marshal(oid)
+ if err != nil {
+ return nil, errors.New("x509: failed to marshal curve OID: " + err.Error())
+ }
+
+ privKey.Algo = pkix.AlgorithmIdentifier{
+ Algorithm: OIDPublicKeyECDSA,
+ Parameters: asn1.RawValue{
+ FullBytes: oidBytes,
+ },
+ }
+
+ if privKey.PrivateKey, err = marshalECPrivateKeyWithOID(k, nil); err != nil {
+ return nil, errors.New("x509: failed to marshal EC private key while building PKCS#8: " + err.Error())
+ }
+
+ case ed25519.PrivateKey:
+ privKey.Algo = pkix.AlgorithmIdentifier{
+ Algorithm: OIDPublicKeyEd25519,
+ }
+ curvePrivateKey, err := asn1.Marshal(k.Seed())
+ if err != nil {
+ return nil, fmt.Errorf("x509: failed to marshal private key: %v", err)
+ }
+ privKey.PrivateKey = curvePrivateKey
+
+ default:
+ return nil, fmt.Errorf("x509: unknown key type while marshaling PKCS#8: %T", key)
+ }
+
+ return asn1.Marshal(privKey)
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/pkix/pkix.go b/vendor/github.com/google/certificate-transparency-go/x509/pkix/pkix.go
new file mode 100644
index 00000000000..1716f908abc
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/pkix/pkix.go
@@ -0,0 +1,287 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pkix contains shared, low level structures used for ASN.1 parsing
+// and serialization of X.509 certificates, CRL and OCSP.
+package pkix
+
+import (
+ "encoding/hex"
+ "fmt"
+ "math/big"
+ "time"
+
+ "github.com/google/certificate-transparency-go/asn1"
+)
+
+// AlgorithmIdentifier represents the ASN.1 structure of the same name. See RFC
+// 5280, section 4.1.1.2.
+type AlgorithmIdentifier struct {
+ Raw asn1.RawContent
+ Algorithm asn1.ObjectIdentifier
+ Parameters asn1.RawValue `asn1:"optional"`
+}
+
+type RDNSequence []RelativeDistinguishedNameSET
+
+var attributeTypeNames = map[string]string{
+ "2.5.4.6": "C",
+ "2.5.4.10": "O",
+ "2.5.4.11": "OU",
+ "2.5.4.3": "CN",
+ "2.5.4.5": "SERIALNUMBER",
+ "2.5.4.7": "L",
+ "2.5.4.8": "ST",
+ "2.5.4.9": "STREET",
+ "2.5.4.17": "POSTALCODE",
+}
+
+// String returns a string representation of the sequence r,
+// roughly following the RFC 2253 Distinguished Names syntax.
+func (r RDNSequence) String() string {
+ s := ""
+ for i := 0; i < len(r); i++ {
+ rdn := r[len(r)-1-i]
+ if i > 0 {
+ s += ","
+ }
+ for j, tv := range rdn {
+ if j > 0 {
+ s += "+"
+ }
+
+ oidString := tv.Type.String()
+ typeName, ok := attributeTypeNames[oidString]
+ if !ok {
+ derBytes, err := asn1.Marshal(tv.Value)
+ if err == nil {
+ s += oidString + "=#" + hex.EncodeToString(derBytes)
+ continue // No value escaping necessary.
+ }
+
+ typeName = oidString
+ }
+
+ valueString := fmt.Sprint(tv.Value)
+ escaped := make([]rune, 0, len(valueString))
+
+ for k, c := range valueString {
+ escape := false
+
+ switch c {
+ case ',', '+', '"', '\\', '<', '>', ';':
+ escape = true
+
+ case ' ':
+ escape = k == 0 || k == len(valueString)-1
+
+ case '#':
+ escape = k == 0
+ }
+
+ if escape {
+ escaped = append(escaped, '\\', c)
+ } else {
+ escaped = append(escaped, c)
+ }
+ }
+
+ s += typeName + "=" + string(escaped)
+ }
+ }
+
+ return s
+}
+
+type RelativeDistinguishedNameSET []AttributeTypeAndValue
+
+// AttributeTypeAndValue mirrors the ASN.1 structure of the same name in
+// RFC 5280, Section 4.1.2.4.
+type AttributeTypeAndValue struct {
+ Type asn1.ObjectIdentifier
+ Value interface{}
+}
+
+// AttributeTypeAndValueSET represents a set of ASN.1 sequences of
+// AttributeTypeAndValue sequences from RFC 2986 (PKCS #10).
+type AttributeTypeAndValueSET struct {
+ Type asn1.ObjectIdentifier
+ Value [][]AttributeTypeAndValue `asn1:"set"`
+}
+
+// Extension represents the ASN.1 structure of the same name. See RFC
+// 5280, section 4.2.
+type Extension struct {
+ Id asn1.ObjectIdentifier
+ Critical bool `asn1:"optional"`
+ Value []byte
+}
+
+// Name represents an X.509 distinguished name. This only includes the common
+// elements of a DN. When parsing, all elements are stored in Names and
+// non-standard elements can be extracted from there. When marshaling, elements
+// in ExtraNames are appended and override other values with the same OID.
+type Name struct {
+ Country, Organization, OrganizationalUnit []string
+ Locality, Province []string
+ StreetAddress, PostalCode []string
+ SerialNumber, CommonName string
+
+ Names []AttributeTypeAndValue
+ ExtraNames []AttributeTypeAndValue
+}
+
+func (n *Name) FillFromRDNSequence(rdns *RDNSequence) {
+ for _, rdn := range *rdns {
+ if len(rdn) == 0 {
+ continue
+ }
+
+ for _, atv := range rdn {
+ n.Names = append(n.Names, atv)
+ value, ok := atv.Value.(string)
+ if !ok {
+ continue
+ }
+
+ t := atv.Type
+ if len(t) == 4 && t[0] == OIDAttribute[0] && t[1] == OIDAttribute[1] && t[2] == OIDAttribute[2] {
+ switch t[3] {
+ case OIDCommonName[3]:
+ n.CommonName = value
+ case OIDSerialNumber[3]:
+ n.SerialNumber = value
+ case OIDCountry[3]:
+ n.Country = append(n.Country, value)
+ case OIDLocality[3]:
+ n.Locality = append(n.Locality, value)
+ case OIDProvince[3]:
+ n.Province = append(n.Province, value)
+ case OIDStreetAddress[3]:
+ n.StreetAddress = append(n.StreetAddress, value)
+ case OIDOrganization[3]:
+ n.Organization = append(n.Organization, value)
+ case OIDOrganizationalUnit[3]:
+ n.OrganizationalUnit = append(n.OrganizationalUnit, value)
+ case OIDPostalCode[3]:
+ n.PostalCode = append(n.PostalCode, value)
+ }
+ }
+ }
+ }
+}
+
+var (
+ OIDAttribute = asn1.ObjectIdentifier{2, 5, 4}
+ OIDCountry = asn1.ObjectIdentifier{2, 5, 4, 6}
+ OIDOrganization = asn1.ObjectIdentifier{2, 5, 4, 10}
+ OIDOrganizationalUnit = asn1.ObjectIdentifier{2, 5, 4, 11}
+ OIDCommonName = asn1.ObjectIdentifier{2, 5, 4, 3}
+ OIDSerialNumber = asn1.ObjectIdentifier{2, 5, 4, 5}
+ OIDLocality = asn1.ObjectIdentifier{2, 5, 4, 7}
+ OIDProvince = asn1.ObjectIdentifier{2, 5, 4, 8}
+ OIDStreetAddress = asn1.ObjectIdentifier{2, 5, 4, 9}
+ OIDPostalCode = asn1.ObjectIdentifier{2, 5, 4, 17}
+
+ OIDPseudonym = asn1.ObjectIdentifier{2, 5, 4, 65}
+ OIDTitle = asn1.ObjectIdentifier{2, 5, 4, 12}
+ OIDDnQualifier = asn1.ObjectIdentifier{2, 5, 4, 46}
+ OIDName = asn1.ObjectIdentifier{2, 5, 4, 41}
+ OIDSurname = asn1.ObjectIdentifier{2, 5, 4, 4}
+ OIDGivenName = asn1.ObjectIdentifier{2, 5, 4, 42}
+ OIDInitials = asn1.ObjectIdentifier{2, 5, 4, 43}
+ OIDGenerationQualifier = asn1.ObjectIdentifier{2, 5, 4, 44}
+)
+
+// appendRDNs appends a relativeDistinguishedNameSET to the given RDNSequence
+// and returns the new value. The relativeDistinguishedNameSET contains an
+// attributeTypeAndValue for each of the given values. See RFC 5280, A.1, and
+// search for AttributeTypeAndValue.
+func (n Name) appendRDNs(in RDNSequence, values []string, oid asn1.ObjectIdentifier) RDNSequence {
+ if len(values) == 0 || oidInAttributeTypeAndValue(oid, n.ExtraNames) {
+ return in
+ }
+
+ s := make([]AttributeTypeAndValue, len(values))
+ for i, value := range values {
+ s[i].Type = oid
+ s[i].Value = value
+ }
+
+ return append(in, s)
+}
+
+func (n Name) ToRDNSequence() (ret RDNSequence) {
+ ret = n.appendRDNs(ret, n.Country, OIDCountry)
+ ret = n.appendRDNs(ret, n.Province, OIDProvince)
+ ret = n.appendRDNs(ret, n.Locality, OIDLocality)
+ ret = n.appendRDNs(ret, n.StreetAddress, OIDStreetAddress)
+ ret = n.appendRDNs(ret, n.PostalCode, OIDPostalCode)
+ ret = n.appendRDNs(ret, n.Organization, OIDOrganization)
+ ret = n.appendRDNs(ret, n.OrganizationalUnit, OIDOrganizationalUnit)
+ if len(n.CommonName) > 0 {
+ ret = n.appendRDNs(ret, []string{n.CommonName}, OIDCommonName)
+ }
+ if len(n.SerialNumber) > 0 {
+ ret = n.appendRDNs(ret, []string{n.SerialNumber}, OIDSerialNumber)
+ }
+ for _, atv := range n.ExtraNames {
+ ret = append(ret, []AttributeTypeAndValue{atv})
+ }
+
+ return ret
+}
+
+// String returns the string form of n, roughly following
+// the RFC 2253 Distinguished Names syntax.
+func (n Name) String() string {
+ return n.ToRDNSequence().String()
+}
+
+// oidInAttributeTypeAndValue reports whether a type with the given OID exists
+// in atv.
+func oidInAttributeTypeAndValue(oid asn1.ObjectIdentifier, atv []AttributeTypeAndValue) bool {
+ for _, a := range atv {
+ if a.Type.Equal(oid) {
+ return true
+ }
+ }
+ return false
+}
+
+// CertificateList represents the ASN.1 structure of the same name. See RFC
+// 5280, section 5.1. Use Certificate.CheckCRLSignature to verify the
+// signature.
+type CertificateList struct {
+ TBSCertList TBSCertificateList
+ SignatureAlgorithm AlgorithmIdentifier
+ SignatureValue asn1.BitString
+}
+
+// HasExpired reports whether certList should have been updated by now.
+func (certList *CertificateList) HasExpired(now time.Time) bool {
+ return !now.Before(certList.TBSCertList.NextUpdate)
+}
+
+// TBSCertificateList represents the ASN.1 structure TBSCertList. See RFC
+// 5280, section 5.1.
+type TBSCertificateList struct {
+ Raw asn1.RawContent
+ Version int `asn1:"optional,default:0"`
+ Signature AlgorithmIdentifier
+ Issuer RDNSequence
+ ThisUpdate time.Time
+ NextUpdate time.Time `asn1:"optional"`
+ RevokedCertificates []RevokedCertificate `asn1:"optional"`
+ Extensions []Extension `asn1:"tag:0,optional,explicit"`
+}
+
+// RevokedCertificate represents the unnamed ASN.1 structure that makes up the
+// revokedCertificates member of the TBSCertList structure. See RFC
+// 5280, section 5.1.
+type RevokedCertificate struct {
+ SerialNumber *big.Int
+ RevocationTime time.Time
+ Extensions []Extension `asn1:"optional"`
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/ptr_sysptr_windows.go b/vendor/github.com/google/certificate-transparency-go/x509/ptr_sysptr_windows.go
new file mode 100644
index 00000000000..06fd439c1fb
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/ptr_sysptr_windows.go
@@ -0,0 +1,21 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.11
+// +build go1.11
+
+package x509
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// For Go versions >= 1.11, the ExtraPolicyPara field in
+// syscall.CertChainPolicyPara is of type syscall.Pointer. See:
+// https://github.com/golang/go/commit/4869ec00e87ef
+
+func convertToPolicyParaType(p unsafe.Pointer) syscall.Pointer {
+ return (syscall.Pointer)(p)
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/ptr_uint_windows.go b/vendor/github.com/google/certificate-transparency-go/x509/ptr_uint_windows.go
new file mode 100644
index 00000000000..f13a47adfbe
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/ptr_uint_windows.go
@@ -0,0 +1,18 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.11
+// +build !go1.11
+
+package x509
+
+import "unsafe"
+
+// For Go versions before 1.11, the ExtraPolicyPara field in
+// syscall.CertChainPolicyPara was of type uintptr. See:
+// https://github.com/golang/go/commit/4869ec00e87ef
+
+func convertToPolicyParaType(p unsafe.Pointer) uintptr {
+ return uintptr(p)
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/revoked.go b/vendor/github.com/google/certificate-transparency-go/x509/revoked.go
new file mode 100644
index 00000000000..e5fa6dd15f5
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/revoked.go
@@ -0,0 +1,365 @@
+// Copyright 2017 Google LLC. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ "bytes"
+ "encoding/pem"
+ "time"
+
+ "github.com/google/certificate-transparency-go/asn1"
+ "github.com/google/certificate-transparency-go/x509/pkix"
+)
+
+// OID values for CRL extensions (TBSCertList.Extensions), RFC 5280 s5.2.
+var (
+ OIDExtensionCRLNumber = asn1.ObjectIdentifier{2, 5, 29, 20}
+ OIDExtensionDeltaCRLIndicator = asn1.ObjectIdentifier{2, 5, 29, 27}
+ OIDExtensionIssuingDistributionPoint = asn1.ObjectIdentifier{2, 5, 29, 28}
+)
+
+// OID values for CRL entry extensions (RevokedCertificate.Extensions), RFC 5280 s5.3
+var (
+ OIDExtensionCRLReasons = asn1.ObjectIdentifier{2, 5, 29, 21}
+ OIDExtensionInvalidityDate = asn1.ObjectIdentifier{2, 5, 29, 24}
+ OIDExtensionCertificateIssuer = asn1.ObjectIdentifier{2, 5, 29, 29}
+)
+
+// RevocationReasonCode represents the reason for a certificate revocation; see RFC 5280 s5.3.1.
+type RevocationReasonCode asn1.Enumerated
+
+// RevocationReasonCode values.
+var (
+ Unspecified = RevocationReasonCode(0)
+ KeyCompromise = RevocationReasonCode(1)
+ CACompromise = RevocationReasonCode(2)
+ AffiliationChanged = RevocationReasonCode(3)
+ Superseded = RevocationReasonCode(4)
+ CessationOfOperation = RevocationReasonCode(5)
+ CertificateHold = RevocationReasonCode(6)
+ RemoveFromCRL = RevocationReasonCode(8)
+ PrivilegeWithdrawn = RevocationReasonCode(9)
+ AACompromise = RevocationReasonCode(10)
+)
+
+// ReasonFlag holds a bitmask of applicable revocation reasons, from RFC 5280 s4.2.1.13
+type ReasonFlag int
+
+// ReasonFlag values.
+const (
+ UnusedFlag ReasonFlag = 1 << iota
+ KeyCompromiseFlag
+ CACompromiseFlag
+ AffiliationChangedFlag
+ SupersededFlag
+ CessationOfOperationFlag
+ CertificateHoldFlag
+ PrivilegeWithdrawnFlag
+ AACompromiseFlag
+)
+
+// CertificateList represents the ASN.1 structure of the same name from RFC 5280, s5.1.
+// It has the same content as pkix.CertificateList, but the contents include parsed versions
+// of any extensions.
+type CertificateList struct {
+ Raw asn1.RawContent
+ TBSCertList TBSCertList
+ SignatureAlgorithm pkix.AlgorithmIdentifier
+ SignatureValue asn1.BitString
+}
+
+// ExpiredAt reports whether now is past the expiry time of certList.
+func (certList *CertificateList) ExpiredAt(now time.Time) bool {
+ return now.After(certList.TBSCertList.NextUpdate)
+}
+
+// Indication of whether extensions need to be critical or non-critical. Extensions that
+// can be either are omitted from the map.
+var listExtCritical = map[string]bool{
+ // From RFC 5280...
+ OIDExtensionAuthorityKeyId.String(): false, // s5.2.1
+ OIDExtensionIssuerAltName.String(): false, // s5.2.2
+ OIDExtensionCRLNumber.String(): false, // s5.2.3
+ OIDExtensionDeltaCRLIndicator.String(): true, // s5.2.4
+ OIDExtensionIssuingDistributionPoint.String(): true, // s5.2.5
+ OIDExtensionFreshestCRL.String(): false, // s5.2.6
+ OIDExtensionAuthorityInfoAccess.String(): false, // s5.2.7
+}
+
+var certExtCritical = map[string]bool{
+ // From RFC 5280...
+ OIDExtensionCRLReasons.String(): false, // s5.3.1
+ OIDExtensionInvalidityDate.String(): false, // s5.3.2
+ OIDExtensionCertificateIssuer.String(): true, // s5.3.3
+}
+
+// IssuingDistributionPoint represents the ASN.1 structure of the same
+// name
+type IssuingDistributionPoint struct {
+ DistributionPoint distributionPointName `asn1:"optional,tag:0"`
+ OnlyContainsUserCerts bool `asn1:"optional,tag:1"`
+ OnlyContainsCACerts bool `asn1:"optional,tag:2"`
+ OnlySomeReasons asn1.BitString `asn1:"optional,tag:3"`
+ IndirectCRL bool `asn1:"optional,tag:4"`
+ OnlyContainsAttributeCerts bool `asn1:"optional,tag:5"`
+}
+
+// TBSCertList represents the ASN.1 structure of the same name from RFC
+// 5280, section 5.1. It has the same content as pkix.TBSCertificateList
+// but the extensions are included in a parsed format.
+type TBSCertList struct {
+ Raw asn1.RawContent
+ Version int
+ Signature pkix.AlgorithmIdentifier
+ Issuer pkix.RDNSequence
+ ThisUpdate time.Time
+ NextUpdate time.Time
+ RevokedCertificates []*RevokedCertificate
+ Extensions []pkix.Extension
+ // Cracked out extensions:
+ AuthorityKeyID []byte
+ IssuerAltNames GeneralNames
+ CRLNumber int
+ BaseCRLNumber int // -1 if no delta CRL present
+ IssuingDistributionPoint IssuingDistributionPoint
+ IssuingDPFullNames GeneralNames
+ FreshestCRLDistributionPoint []string
+ OCSPServer []string
+ IssuingCertificateURL []string
+}
+
+// ParseCertificateList parses a CertificateList (e.g. a CRL) from the given
+// bytes. It's often the case that PEM encoded CRLs will appear where they
+// should be DER encoded, so this function will transparently handle PEM
+// encoding as long as there isn't any leading garbage.
+func ParseCertificateList(clBytes []byte) (*CertificateList, error) {
+ if bytes.HasPrefix(clBytes, pemCRLPrefix) {
+ block, _ := pem.Decode(clBytes)
+ if block != nil && block.Type == pemType {
+ clBytes = block.Bytes
+ }
+ }
+ return ParseCertificateListDER(clBytes)
+}
+
+// ParseCertificateListDER parses a DER encoded CertificateList from the given bytes.
+// For non-fatal errors, this function returns both an error and a CertificateList
+// object.
+func ParseCertificateListDER(derBytes []byte) (*CertificateList, error) {
+ var errs Errors
+ // First parse the DER into the pkix structures.
+ pkixList := new(pkix.CertificateList)
+ if rest, err := asn1.Unmarshal(derBytes, pkixList); err != nil {
+ errs.AddID(ErrInvalidCertList, err)
+ return nil, &errs
+ } else if len(rest) != 0 {
+ errs.AddID(ErrTrailingCertList)
+ return nil, &errs
+ }
+
+ // Transcribe the revoked certs but crack out extensions.
+ revokedCerts := make([]*RevokedCertificate, len(pkixList.TBSCertList.RevokedCertificates))
+ for i, pkixRevoked := range pkixList.TBSCertList.RevokedCertificates {
+ revokedCerts[i] = parseRevokedCertificate(pkixRevoked, &errs)
+ if revokedCerts[i] == nil {
+ return nil, &errs
+ }
+ }
+
+ certList := CertificateList{
+ Raw: derBytes,
+ TBSCertList: TBSCertList{
+ Raw: pkixList.TBSCertList.Raw,
+ Version: pkixList.TBSCertList.Version,
+ Signature: pkixList.TBSCertList.Signature,
+ Issuer: pkixList.TBSCertList.Issuer,
+ ThisUpdate: pkixList.TBSCertList.ThisUpdate,
+ NextUpdate: pkixList.TBSCertList.NextUpdate,
+ RevokedCertificates: revokedCerts,
+ Extensions: pkixList.TBSCertList.Extensions,
+ CRLNumber: -1,
+ BaseCRLNumber: -1,
+ },
+ SignatureAlgorithm: pkixList.SignatureAlgorithm,
+ SignatureValue: pkixList.SignatureValue,
+ }
+
+ // Now crack out extensions.
+ for _, e := range certList.TBSCertList.Extensions {
+ if expectCritical, present := listExtCritical[e.Id.String()]; present {
+ if e.Critical && !expectCritical {
+ errs.AddID(ErrUnexpectedlyCriticalCertListExtension, e.Id)
+ } else if !e.Critical && expectCritical {
+ errs.AddID(ErrUnexpectedlyNonCriticalCertListExtension, e.Id)
+ }
+ }
+ switch {
+ case e.Id.Equal(OIDExtensionAuthorityKeyId):
+ // RFC 5280 s5.2.1
+ var a authKeyId
+ if rest, err := asn1.Unmarshal(e.Value, &a); err != nil {
+ errs.AddID(ErrInvalidCertListAuthKeyID, err)
+ } else if len(rest) != 0 {
+ errs.AddID(ErrTrailingCertListAuthKeyID)
+ }
+ certList.TBSCertList.AuthorityKeyID = a.Id
+ case e.Id.Equal(OIDExtensionIssuerAltName):
+ // RFC 5280 s5.2.2
+ if err := parseGeneralNames(e.Value, &certList.TBSCertList.IssuerAltNames); err != nil {
+ errs.AddID(ErrInvalidCertListIssuerAltName, err)
+ }
+ case e.Id.Equal(OIDExtensionCRLNumber):
+ // RFC 5280 s5.2.3
+ if rest, err := asn1.Unmarshal(e.Value, &certList.TBSCertList.CRLNumber); err != nil {
+ errs.AddID(ErrInvalidCertListCRLNumber, err)
+ } else if len(rest) != 0 {
+ errs.AddID(ErrTrailingCertListCRLNumber)
+ }
+ if certList.TBSCertList.CRLNumber < 0 {
+ errs.AddID(ErrNegativeCertListCRLNumber, certList.TBSCertList.CRLNumber)
+ }
+ case e.Id.Equal(OIDExtensionDeltaCRLIndicator):
+ // RFC 5280 s5.2.4
+ if rest, err := asn1.Unmarshal(e.Value, &certList.TBSCertList.BaseCRLNumber); err != nil {
+ errs.AddID(ErrInvalidCertListDeltaCRL, err)
+ } else if len(rest) != 0 {
+ errs.AddID(ErrTrailingCertListDeltaCRL)
+ }
+ if certList.TBSCertList.BaseCRLNumber < 0 {
+ errs.AddID(ErrNegativeCertListDeltaCRL, certList.TBSCertList.BaseCRLNumber)
+ }
+ case e.Id.Equal(OIDExtensionIssuingDistributionPoint):
+ parseIssuingDistributionPoint(e.Value, &certList.TBSCertList.IssuingDistributionPoint, &certList.TBSCertList.IssuingDPFullNames, &errs)
+ case e.Id.Equal(OIDExtensionFreshestCRL):
+ // RFC 5280 s5.2.6
+ if err := parseDistributionPoints(e.Value, &certList.TBSCertList.FreshestCRLDistributionPoint); err != nil {
+ errs.AddID(ErrInvalidCertListFreshestCRL, err)
+ return nil, err
+ }
+ case e.Id.Equal(OIDExtensionAuthorityInfoAccess):
+ // RFC 5280 s5.2.7
+ var aia []accessDescription
+ if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil {
+ errs.AddID(ErrInvalidCertListAuthInfoAccess, err)
+ } else if len(rest) != 0 {
+ errs.AddID(ErrTrailingCertListAuthInfoAccess)
+ }
+
+ for _, v := range aia {
+ // GeneralName: uniformResourceIdentifier [6] IA5String
+ if v.Location.Tag != tagURI {
+ continue
+ }
+ switch {
+ case v.Method.Equal(OIDAuthorityInfoAccessOCSP):
+ certList.TBSCertList.OCSPServer = append(certList.TBSCertList.OCSPServer, string(v.Location.Bytes))
+ case v.Method.Equal(OIDAuthorityInfoAccessIssuers):
+ certList.TBSCertList.IssuingCertificateURL = append(certList.TBSCertList.IssuingCertificateURL, string(v.Location.Bytes))
+ }
+ // TODO(drysdale): cope with more possibilities
+ }
+ default:
+ if e.Critical {
+ errs.AddID(ErrUnhandledCriticalCertListExtension, e.Id)
+ }
+ }
+ }
+
+ if errs.Fatal() {
+ return nil, &errs
+ }
+ if errs.Empty() {
+ return &certList, nil
+ }
+ return &certList, &errs
+}
+
+func parseIssuingDistributionPoint(data []byte, idp *IssuingDistributionPoint, name *GeneralNames, errs *Errors) {
+ // RFC 5280 s5.2.5
+ if rest, err := asn1.Unmarshal(data, idp); err != nil {
+ errs.AddID(ErrInvalidCertListIssuingDP, err)
+ } else if len(rest) != 0 {
+ errs.AddID(ErrTrailingCertListIssuingDP)
+ }
+
+ typeCount := 0
+ if idp.OnlyContainsUserCerts {
+ typeCount++
+ }
+ if idp.OnlyContainsCACerts {
+ typeCount++
+ }
+ if idp.OnlyContainsAttributeCerts {
+ typeCount++
+ }
+ if typeCount > 1 {
+ errs.AddID(ErrCertListIssuingDPMultipleTypes, idp.OnlyContainsUserCerts, idp.OnlyContainsCACerts, idp.OnlyContainsAttributeCerts)
+ }
+ for _, fn := range idp.DistributionPoint.FullName {
+ if _, err := parseGeneralName(fn.FullBytes, name, false); err != nil {
+ errs.AddID(ErrCertListIssuingDPInvalidFullName, err)
+ }
+ }
+}
+
+// RevokedCertificate represents the unnamed ASN.1 structure that makes up the
+// revokedCertificates member of the TBSCertList structure from RFC 5280, s5.1.
+// It has the same content as pkix.RevokedCertificate but the extensions are
+// included in a parsed format.
+type RevokedCertificate struct {
+ pkix.RevokedCertificate
+ // Cracked out extensions:
+ RevocationReason RevocationReasonCode
+ InvalidityDate time.Time
+ Issuer GeneralNames
+}
+
+func parseRevokedCertificate(pkixRevoked pkix.RevokedCertificate, errs *Errors) *RevokedCertificate {
+ result := RevokedCertificate{RevokedCertificate: pkixRevoked}
+ for _, e := range pkixRevoked.Extensions {
+ if expectCritical, present := certExtCritical[e.Id.String()]; present {
+ if e.Critical && !expectCritical {
+ errs.AddID(ErrUnexpectedlyCriticalRevokedCertExtension, e.Id)
+ } else if !e.Critical && expectCritical {
+ errs.AddID(ErrUnexpectedlyNonCriticalRevokedCertExtension, e.Id)
+ }
+ }
+ switch {
+ case e.Id.Equal(OIDExtensionCRLReasons):
+ // RFC 5280, s5.3.1
+ var reason asn1.Enumerated
+ if rest, err := asn1.Unmarshal(e.Value, &reason); err != nil {
+ errs.AddID(ErrInvalidRevocationReason, err)
+ } else if len(rest) != 0 {
+ errs.AddID(ErrTrailingRevocationReason)
+ }
+ result.RevocationReason = RevocationReasonCode(reason)
+ case e.Id.Equal(OIDExtensionInvalidityDate):
+ // RFC 5280, s5.3.2
+ if rest, err := asn1.Unmarshal(e.Value, &result.InvalidityDate); err != nil {
+ errs.AddID(ErrInvalidRevocationInvalidityDate, err)
+ } else if len(rest) != 0 {
+ errs.AddID(ErrTrailingRevocationInvalidityDate)
+ }
+ case e.Id.Equal(OIDExtensionCertificateIssuer):
+ // RFC 5280, s5.3.3
+ if err := parseGeneralNames(e.Value, &result.Issuer); err != nil {
+ errs.AddID(ErrInvalidRevocationIssuer, err)
+ }
+ default:
+ if e.Critical {
+ errs.AddID(ErrUnhandledCriticalRevokedCertExtension, e.Id)
+ }
+ }
+ }
+ return &result
+}
+
+// CheckCertificateListSignature checks that the signature in crl is from c.
+func (c *Certificate) CheckCertificateListSignature(crl *CertificateList) error {
+ algo := SignatureAlgorithmFromAI(crl.SignatureAlgorithm)
+ return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign())
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root.go b/vendor/github.com/google/certificate-transparency-go/x509/root.go
new file mode 100644
index 00000000000..240296247df
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root.go
@@ -0,0 +1,25 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import "sync"
+
+var (
+ once sync.Once
+ systemRoots *CertPool
+ systemRootsErr error
+)
+
+func systemRootsPool() *CertPool {
+ once.Do(initSystemRoots)
+ return systemRoots
+}
+
+func initSystemRoots() {
+ systemRoots, systemRootsErr = loadSystemRoots()
+ if systemRootsErr != nil {
+ systemRoots = nil
+ }
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_aix.go b/vendor/github.com/google/certificate-transparency-go/x509/root_aix.go
new file mode 100644
index 00000000000..6d427739a43
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_aix.go
@@ -0,0 +1,10 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+// Possible certificate files; stop after finding one.
+var certFiles = []string{
+ "/var/ssl/certs/ca-bundle.crt",
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_bsd.go b/vendor/github.com/google/certificate-transparency-go/x509/root_bsd.go
new file mode 100644
index 00000000000..8c04bdcdfac
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_bsd.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build dragonfly || freebsd || netbsd || openbsd
+// +build dragonfly freebsd netbsd openbsd
+
+package x509
+
+// Possible certificate files; stop after finding one.
+var certFiles = []string{
+ "/usr/local/etc/ssl/cert.pem", // FreeBSD
+ "/etc/ssl/cert.pem", // OpenBSD
+ "/usr/local/share/certs/ca-root-nss.crt", // DragonFly
+ "/etc/openssl/certs/ca-certificates.crt", // NetBSD
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_cgo_darwin.go b/vendor/github.com/google/certificate-transparency-go/x509/root_cgo_darwin.go
new file mode 100644
index 00000000000..dba99bb8dc1
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_cgo_darwin.go
@@ -0,0 +1,315 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build cgo && !arm && !arm64 && !ios
+// +build cgo,!arm,!arm64,!ios
+
+package x509
+
+/*
+#cgo CFLAGS: -mmacosx-version-min=10.10 -D__MAC_OS_X_VERSION_MAX_ALLOWED=101300
+#cgo LDFLAGS: -framework CoreFoundation -framework Security
+
+#include
+#include
+
+#include
+#include
+
+static Boolean isSSLPolicy(SecPolicyRef policyRef) {
+ if (!policyRef) {
+ return false;
+ }
+ CFDictionaryRef properties = SecPolicyCopyProperties(policyRef);
+ if (properties == NULL) {
+ return false;
+ }
+ Boolean isSSL = false;
+ CFTypeRef value = NULL;
+ if (CFDictionaryGetValueIfPresent(properties, kSecPolicyOid, (const void **)&value)) {
+ isSSL = CFEqual(value, kSecPolicyAppleSSL);
+ }
+ CFRelease(properties);
+ return isSSL;
+}
+
+// sslTrustSettingsResult obtains the final kSecTrustSettingsResult value
+// for a certificate in the user or admin domain, combining usage constraints
+// for the SSL SecTrustSettingsPolicy, ignoring SecTrustSettingsKeyUsage and
+// kSecTrustSettingsAllowedError.
+// https://developer.apple.com/documentation/security/1400261-sectrustsettingscopytrustsetting
+static SInt32 sslTrustSettingsResult(SecCertificateRef cert) {
+ CFArrayRef trustSettings = NULL;
+ OSStatus err = SecTrustSettingsCopyTrustSettings(cert, kSecTrustSettingsDomainUser, &trustSettings);
+
+ // According to Apple's SecTrustServer.c, "user trust settings overrule admin trust settings",
+ // but the rules of the override are unclear. Let's assume admin trust settings are applicable
+ // if and only if user trust settings fail to load or are NULL.
+ if (err != errSecSuccess || trustSettings == NULL) {
+ if (trustSettings != NULL) CFRelease(trustSettings);
+ err = SecTrustSettingsCopyTrustSettings(cert, kSecTrustSettingsDomainAdmin, &trustSettings);
+ }
+
+ // > no trust settings [...] means "this certificate must be verified to a known trusted certificate”
+ // (Should this cause a fallback from user to admin domain? It's unclear.)
+ if (err != errSecSuccess || trustSettings == NULL) {
+ if (trustSettings != NULL) CFRelease(trustSettings);
+ return kSecTrustSettingsResultUnspecified;
+ }
+
+ // > An empty trust settings array means "always trust this certificate” with an
+ // > overall trust setting for the certificate of kSecTrustSettingsResultTrustRoot.
+ if (CFArrayGetCount(trustSettings) == 0) {
+ CFRelease(trustSettings);
+ return kSecTrustSettingsResultTrustRoot;
+ }
+
+ // kSecTrustSettingsResult is defined as CFSTR("kSecTrustSettingsResult"),
+ // but the Go linker's internal linking mode can't handle CFSTR relocations.
+ // Create our own dynamic string instead and release it below.
+ CFStringRef _kSecTrustSettingsResult = CFStringCreateWithCString(
+ NULL, "kSecTrustSettingsResult", kCFStringEncodingUTF8);
+ CFStringRef _kSecTrustSettingsPolicy = CFStringCreateWithCString(
+ NULL, "kSecTrustSettingsPolicy", kCFStringEncodingUTF8);
+ CFStringRef _kSecTrustSettingsPolicyString = CFStringCreateWithCString(
+ NULL, "kSecTrustSettingsPolicyString", kCFStringEncodingUTF8);
+
+ CFIndex m; SInt32 result = 0;
+ for (m = 0; m < CFArrayGetCount(trustSettings); m++) {
+ CFDictionaryRef tSetting = (CFDictionaryRef)CFArrayGetValueAtIndex(trustSettings, m);
+
+ // First, check if this trust setting is constrained to a non-SSL policy.
+ SecPolicyRef policyRef;
+ if (CFDictionaryGetValueIfPresent(tSetting, _kSecTrustSettingsPolicy, (const void**)&policyRef)) {
+ if (!isSSLPolicy(policyRef)) {
+ continue;
+ }
+ }
+
+ if (CFDictionaryContainsKey(tSetting, _kSecTrustSettingsPolicyString)) {
+ // Restricted to a hostname, not a root.
+ continue;
+ }
+
+ CFNumberRef cfNum;
+ if (CFDictionaryGetValueIfPresent(tSetting, _kSecTrustSettingsResult, (const void**)&cfNum)) {
+ CFNumberGetValue(cfNum, kCFNumberSInt32Type, &result);
+ } else {
+ // > If this key is not present, a default value of
+ // > kSecTrustSettingsResultTrustRoot is assumed.
+ result = kSecTrustSettingsResultTrustRoot;
+ }
+
+ // If multiple dictionaries match, we are supposed to "OR" them,
+ // the semantics of which are not clear. Since TrustRoot and TrustAsRoot
+ // are mutually exclusive, Deny should probably override, and Invalid and
+ // Unspecified be overridden, approximate this by stopping at the first
+ // TrustRoot, TrustAsRoot or Deny.
+ if (result == kSecTrustSettingsResultTrustRoot) {
+ break;
+ } else if (result == kSecTrustSettingsResultTrustAsRoot) {
+ break;
+ } else if (result == kSecTrustSettingsResultDeny) {
+ break;
+ }
+ }
+
+ // If trust settings are present, but none of them match the policy...
+ // the docs don't tell us what to do.
+ //
+ // "Trust settings for a given use apply if any of the dictionaries in the
+ // certificate’s trust settings array satisfies the specified use." suggests
+ // that it's as if there were no trust settings at all, so we should probably
+ // fallback to the admin trust settings. TODO.
+ if (result == 0) {
+ result = kSecTrustSettingsResultUnspecified;
+ }
+
+ CFRelease(_kSecTrustSettingsPolicy);
+ CFRelease(_kSecTrustSettingsPolicyString);
+ CFRelease(_kSecTrustSettingsResult);
+ CFRelease(trustSettings);
+
+ return result;
+}
+
+// isRootCertificate reports whether Subject and Issuer match.
+static Boolean isRootCertificate(SecCertificateRef cert, CFErrorRef *errRef) {
+ CFDataRef subjectName = SecCertificateCopyNormalizedSubjectContent(cert, errRef);
+ if (*errRef != NULL) {
+ return false;
+ }
+ CFDataRef issuerName = SecCertificateCopyNormalizedIssuerContent(cert, errRef);
+ if (*errRef != NULL) {
+ CFRelease(subjectName);
+ return false;
+ }
+ Boolean equal = CFEqual(subjectName, issuerName);
+ CFRelease(subjectName);
+ CFRelease(issuerName);
+ return equal;
+}
+
+// CopyPEMRootsCTX509 fetches the system's list of trusted X.509 root certificates
+// for the kSecTrustSettingsPolicy SSL.
+//
+// On success it returns 0 and fills pemRoots with a CFDataRef that contains the extracted root
+// certificates of the system. On failure, the function returns -1.
+// Additionally, it fills untrustedPemRoots with certs that must be removed from pemRoots.
+//
+// Note: The CFDataRef returned in pemRoots and untrustedPemRoots must
+// be released (using CFRelease) after we've consumed its content.
+static int CopyPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots, bool debugDarwinRoots) {
+ int i;
+
+ if (debugDarwinRoots) {
+ fprintf(stderr, "crypto/x509: kSecTrustSettingsResultInvalid = %d\n", kSecTrustSettingsResultInvalid);
+ fprintf(stderr, "crypto/x509: kSecTrustSettingsResultTrustRoot = %d\n", kSecTrustSettingsResultTrustRoot);
+ fprintf(stderr, "crypto/x509: kSecTrustSettingsResultTrustAsRoot = %d\n", kSecTrustSettingsResultTrustAsRoot);
+ fprintf(stderr, "crypto/x509: kSecTrustSettingsResultDeny = %d\n", kSecTrustSettingsResultDeny);
+ fprintf(stderr, "crypto/x509: kSecTrustSettingsResultUnspecified = %d\n", kSecTrustSettingsResultUnspecified);
+ }
+
+ // Get certificates from all domains, not just System, this lets
+ // the user add CAs to their "login" keychain, and Admins to add
+ // to the "System" keychain
+ SecTrustSettingsDomain domains[] = { kSecTrustSettingsDomainSystem,
+ kSecTrustSettingsDomainAdmin, kSecTrustSettingsDomainUser };
+
+ int numDomains = sizeof(domains)/sizeof(SecTrustSettingsDomain);
+ if (pemRoots == NULL || untrustedPemRoots == NULL) {
+ return -1;
+ }
+
+ CFMutableDataRef combinedData = CFDataCreateMutable(kCFAllocatorDefault, 0);
+ CFMutableDataRef combinedUntrustedData = CFDataCreateMutable(kCFAllocatorDefault, 0);
+ for (i = 0; i < numDomains; i++) {
+ int j;
+ CFArrayRef certs = NULL;
+ OSStatus err = SecTrustSettingsCopyCertificates(domains[i], &certs);
+ if (err != noErr) {
+ continue;
+ }
+
+ CFIndex numCerts = CFArrayGetCount(certs);
+ for (j = 0; j < numCerts; j++) {
+ SecCertificateRef cert = (SecCertificateRef)CFArrayGetValueAtIndex(certs, j);
+ if (cert == NULL) {
+ continue;
+ }
+
+ SInt32 result;
+ if (domains[i] == kSecTrustSettingsDomainSystem) {
+ // Certs found in the system domain are always trusted. If the user
+ // configures "Never Trust" on such a cert, it will also be found in the
+ // admin or user domain, causing it to be added to untrustedPemRoots. The
+ // Go code will then clean this up.
+ result = kSecTrustSettingsResultTrustRoot;
+ } else {
+ result = sslTrustSettingsResult(cert);
+ if (debugDarwinRoots) {
+ CFErrorRef errRef = NULL;
+ CFStringRef summary = SecCertificateCopyShortDescription(NULL, cert, &errRef);
+ if (errRef != NULL) {
+ fprintf(stderr, "crypto/x509: SecCertificateCopyShortDescription failed\n");
+ CFRelease(errRef);
+ continue;
+ }
+
+ CFIndex length = CFStringGetLength(summary);
+ CFIndex maxSize = CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingUTF8) + 1;
+ char *buffer = malloc(maxSize);
+ if (CFStringGetCString(summary, buffer, maxSize, kCFStringEncodingUTF8)) {
+ fprintf(stderr, "crypto/x509: %s returned %d\n", buffer, (int)result);
+ }
+ free(buffer);
+ CFRelease(summary);
+ }
+ }
+
+ CFMutableDataRef appendTo;
+ // > Note the distinction between the results kSecTrustSettingsResultTrustRoot
+ // > and kSecTrustSettingsResultTrustAsRoot: The former can only be applied to
+ // > root (self-signed) certificates; the latter can only be applied to
+ // > non-root certificates.
+ if (result == kSecTrustSettingsResultTrustRoot) {
+ CFErrorRef errRef = NULL;
+ if (!isRootCertificate(cert, &errRef) || errRef != NULL) {
+ if (errRef != NULL) CFRelease(errRef);
+ continue;
+ }
+
+ appendTo = combinedData;
+ } else if (result == kSecTrustSettingsResultTrustAsRoot) {
+ CFErrorRef errRef = NULL;
+ if (isRootCertificate(cert, &errRef) || errRef != NULL) {
+ if (errRef != NULL) CFRelease(errRef);
+ continue;
+ }
+
+ appendTo = combinedData;
+ } else if (result == kSecTrustSettingsResultDeny) {
+ appendTo = combinedUntrustedData;
+ } else if (result == kSecTrustSettingsResultUnspecified) {
+ // Certificates with unspecified trust should probably be added to a pool of
+ // intermediates for chain building, or checked for transitive trust and
+ // added to the root pool (which is an imprecise approximation because it
+ // cuts chains short) but we don't support either at the moment. TODO.
+ continue;
+ } else {
+ continue;
+ }
+
+ CFDataRef data = NULL;
+ err = SecItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data);
+ if (err != noErr) {
+ continue;
+ }
+ if (data != NULL) {
+ CFDataAppendBytes(appendTo, CFDataGetBytePtr(data), CFDataGetLength(data));
+ CFRelease(data);
+ }
+ }
+ CFRelease(certs);
+ }
+ *pemRoots = combinedData;
+ *untrustedPemRoots = combinedUntrustedData;
+ return 0;
+}
+*/
+import "C"
+import (
+ "errors"
+ "unsafe"
+)
+
+func loadSystemRoots() (*CertPool, error) {
+ var data, untrustedData C.CFDataRef
+ err := C.CopyPEMRootsCTX509(&data, &untrustedData, C.bool(debugDarwinRoots))
+ if err == -1 {
+ return nil, errors.New("crypto/x509: failed to load darwin system roots with cgo")
+ }
+ defer C.CFRelease(C.CFTypeRef(data))
+ defer C.CFRelease(C.CFTypeRef(untrustedData))
+
+ buf := C.GoBytes(unsafe.Pointer(C.CFDataGetBytePtr(data)), C.int(C.CFDataGetLength(data)))
+ roots := NewCertPool()
+ roots.AppendCertsFromPEM(buf)
+
+ if C.CFDataGetLength(untrustedData) == 0 {
+ return roots, nil
+ }
+
+ buf = C.GoBytes(unsafe.Pointer(C.CFDataGetBytePtr(untrustedData)), C.int(C.CFDataGetLength(untrustedData)))
+ untrustedRoots := NewCertPool()
+ untrustedRoots.AppendCertsFromPEM(buf)
+
+ trustedRoots := NewCertPool()
+ for _, c := range roots.certs {
+ if !untrustedRoots.contains(c) {
+ trustedRoots.AddCert(c)
+ }
+ }
+ return trustedRoots, nil
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_darwin.go b/vendor/github.com/google/certificate-transparency-go/x509/root_darwin.go
new file mode 100644
index 00000000000..4330ae97a44
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_darwin.go
@@ -0,0 +1,288 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run root_darwin_arm_gen.go -output root_darwin_armx.go
+
+package x509
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/sha1"
+ "encoding/pem"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "os/user"
+ "path/filepath"
+ "strings"
+ "sync"
+)
+
+var debugDarwinRoots = strings.Contains(os.Getenv("GODEBUG"), "x509roots=1")
+
+func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
+ return nil, nil
+}
+
+// This code is only used when compiling without cgo.
+// It is here, instead of root_nocgo_darwin.go, so that tests can check it
+// even if the tests are run with cgo enabled.
+// The linker will not include these unused functions in binaries built with cgo enabled.
+
+// execSecurityRoots finds the macOS list of trusted root certificates
+// using only command-line tools. This is our fallback path when cgo isn't available.
+//
+// The strategy is as follows:
+//
+// 1. Run "security trust-settings-export" and "security
+// trust-settings-export -d" to discover the set of certs with some
+// user-tweaked trust policy. We're too lazy to parse the XML
+// (Issue 26830) to understand what the trust
+// policy actually is. We just learn that there is _some_ policy.
+//
+// 2. Run "security find-certificate" to dump the list of system root
+// CAs in PEM format.
+//
+// 3. For each dumped cert, conditionally verify it with "security
+// verify-cert" if that cert was in the set discovered in Step 1.
+// Without the Step 1 optimization, running "security verify-cert"
+// 150-200 times takes 3.5 seconds. With the optimization, the
+// whole process takes about 180 milliseconds with 1 untrusted root
+// CA. (Compared to 110ms in the cgo path)
+func execSecurityRoots() (*CertPool, error) {
+ hasPolicy, err := getCertsWithTrustPolicy()
+ if err != nil {
+ return nil, err
+ }
+ if debugDarwinRoots {
+ fmt.Fprintf(os.Stderr, "crypto/x509: %d certs have a trust policy\n", len(hasPolicy))
+ }
+
+ keychains := []string{"/Library/Keychains/System.keychain"}
+
+ // Note that this results in trusting roots from $HOME/... (the environment
+ // variable), which might not be expected.
+ u, err := user.Current()
+ if err != nil {
+ if debugDarwinRoots {
+ fmt.Fprintf(os.Stderr, "crypto/x509: can't get user home directory: %v\n", err)
+ }
+ } else {
+ keychains = append(keychains,
+ filepath.Join(u.HomeDir, "/Library/Keychains/login.keychain"),
+
+ // Fresh installs of Sierra use a slightly different path for the login keychain
+ filepath.Join(u.HomeDir, "/Library/Keychains/login.keychain-db"),
+ )
+ }
+
+ type rootCandidate struct {
+ c *Certificate
+ system bool
+ }
+
+ var (
+ mu sync.Mutex
+ roots = NewCertPool()
+ numVerified int // number of execs of 'security verify-cert', for debug stats
+ wg sync.WaitGroup
+ verifyCh = make(chan rootCandidate)
+ )
+
+ // Using 4 goroutines to pipe into verify-cert seems to be
+ // about the best we can do. The verify-cert binary seems to
+ // just RPC to another server with coarse locking anyway, so
+ // running 16 at a time for instance doesn't help at all. Due
+ // to the "if hasPolicy" check below, though, we will rarely
+ // (or never) call verify-cert on stock macOS systems, though.
+ // The hope is that we only call verify-cert when the user has
+ // tweaked their trust policy. These 4 goroutines are only
+ // defensive in the pathological case of many trust edits.
+ for i := 0; i < 4; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for cert := range verifyCh {
+ sha1CapHex := fmt.Sprintf("%X", sha1.Sum(cert.c.Raw))
+
+ var valid bool
+ verifyChecks := 0
+ if hasPolicy[sha1CapHex] {
+ verifyChecks++
+ valid = verifyCertWithSystem(cert.c)
+ } else {
+ // Certificates not in SystemRootCertificates without user
+ // or admin trust settings are not trusted.
+ valid = cert.system
+ }
+
+ mu.Lock()
+ numVerified += verifyChecks
+ if valid {
+ roots.AddCert(cert.c)
+ }
+ mu.Unlock()
+ }
+ }()
+ }
+ err = forEachCertInKeychains(keychains, func(cert *Certificate) {
+ verifyCh <- rootCandidate{c: cert, system: false}
+ })
+ if err != nil {
+ close(verifyCh)
+ return nil, err
+ }
+ err = forEachCertInKeychains([]string{
+ "/System/Library/Keychains/SystemRootCertificates.keychain",
+ }, func(cert *Certificate) {
+ verifyCh <- rootCandidate{c: cert, system: true}
+ })
+ if err != nil {
+ close(verifyCh)
+ return nil, err
+ }
+ close(verifyCh)
+ wg.Wait()
+
+ if debugDarwinRoots {
+ fmt.Fprintf(os.Stderr, "crypto/x509: ran security verify-cert %d times\n", numVerified)
+ }
+
+ return roots, nil
+}
+
+func forEachCertInKeychains(paths []string, f func(*Certificate)) error {
+ args := append([]string{"find-certificate", "-a", "-p"}, paths...)
+ cmd := exec.Command("/usr/bin/security", args...)
+ data, err := cmd.Output()
+ if err != nil {
+ return err
+ }
+ for len(data) > 0 {
+ var block *pem.Block
+ block, data = pem.Decode(data)
+ if block == nil {
+ break
+ }
+ if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
+ continue
+ }
+ cert, err := ParseCertificate(block.Bytes)
+ if err != nil {
+ continue
+ }
+ f(cert)
+ }
+ return nil
+}
+
+func verifyCertWithSystem(cert *Certificate) bool {
+ data := pem.EncodeToMemory(&pem.Block{
+ Type: "CERTIFICATE", Bytes: cert.Raw,
+ })
+
+ f, err := os.CreateTemp("", "cert")
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "can't create temporary file for cert: %v", err)
+ return false
+ }
+ defer os.Remove(f.Name())
+ if _, err := f.Write(data); err != nil {
+ fmt.Fprintf(os.Stderr, "can't write temporary file for cert: %v", err)
+ return false
+ }
+ if err := f.Close(); err != nil {
+ fmt.Fprintf(os.Stderr, "can't write temporary file for cert: %v", err)
+ return false
+ }
+ cmd := exec.Command("/usr/bin/security", "verify-cert", "-p", "ssl", "-c", f.Name(), "-l", "-L")
+ var stderr bytes.Buffer
+ if debugDarwinRoots {
+ cmd.Stderr = &stderr
+ }
+ if err := cmd.Run(); err != nil {
+ if debugDarwinRoots {
+ fmt.Fprintf(os.Stderr, "crypto/x509: verify-cert rejected %s: %q\n", cert.Subject, bytes.TrimSpace(stderr.Bytes()))
+ }
+ return false
+ }
+ if debugDarwinRoots {
+ fmt.Fprintf(os.Stderr, "crypto/x509: verify-cert approved %s\n", cert.Subject)
+ }
+ return true
+}
+
+// getCertsWithTrustPolicy returns the set of certs that have a
+// possibly-altered trust policy. The keys of the map are capitalized
+// sha1 hex of the raw cert.
+// They are the certs that should be checked against `security
+// verify-cert` to see whether the user altered the default trust
+// settings. This code is only used for cgo-disabled builds.
+func getCertsWithTrustPolicy() (map[string]bool, error) {
+ set := map[string]bool{}
+ td, err := os.MkdirTemp("", "x509trustpolicy")
+ if err != nil {
+ return nil, err
+ }
+ defer os.RemoveAll(td)
+ run := func(file string, args ...string) error {
+ file = filepath.Join(td, file)
+ args = append(args, file)
+ cmd := exec.Command("/usr/bin/security", args...)
+ var stderr bytes.Buffer
+ cmd.Stderr = &stderr
+ if err := cmd.Run(); err != nil {
+ // If there are no trust settings, the
+ // `security trust-settings-export` command
+ // fails with:
+ // exit status 1, SecTrustSettingsCreateExternalRepresentation: No Trust Settings were found.
+ // Rather than match on English substrings that are probably
+ // localized on macOS, just interpret any failure to mean that
+ // there are no trust settings.
+ if debugDarwinRoots {
+ fmt.Fprintf(os.Stderr, "crypto/x509: exec %q: %v, %s\n", cmd.Args, err, stderr.Bytes())
+ }
+ return nil
+ }
+
+ f, err := os.Open(file)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // Gather all the runs of 40 capitalized hex characters.
+ br := bufio.NewReader(f)
+ var hexBuf bytes.Buffer
+ for {
+ b, err := br.ReadByte()
+ isHex := ('A' <= b && b <= 'F') || ('0' <= b && b <= '9')
+ if isHex {
+ hexBuf.WriteByte(b)
+ } else {
+ if hexBuf.Len() == 40 {
+ set[hexBuf.String()] = true
+ }
+ hexBuf.Reset()
+ }
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+ }
+ if err := run("user", "trust-settings-export"); err != nil {
+ return nil, fmt.Errorf("dump-trust-settings (user): %v", err)
+ }
+ if err := run("admin", "trust-settings-export", "-d"); err != nil {
+ return nil, fmt.Errorf("dump-trust-settings (admin): %v", err)
+ }
+ return set, nil
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_darwin_armx.go b/vendor/github.com/google/certificate-transparency-go/x509/root_darwin_armx.go
new file mode 100644
index 00000000000..5c93349b0bc
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_darwin_armx.go
@@ -0,0 +1,4314 @@
+// Code generated by root_darwin_arm_gen --output root_darwin_armx.go; DO NOT EDIT.
+
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build cgo && darwin && (arm || arm64 || ios)
+// +build cgo
+// +build darwin
+// +build arm arm64 ios
+
+package x509
+
+func loadSystemRoots() (*CertPool, error) {
+ p := NewCertPool()
+ p.AppendCertsFromPEM([]byte(systemRootsPEM))
+ return p, nil
+}
+
+const systemRootsPEM = `
+-----BEGIN CERTIFICATE-----
+MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb
+MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow
+GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj
+YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL
+MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
+BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM
+GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua
+BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe
+3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4
+YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR
+rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm
+ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU
+oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF
+MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v
+QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t
+b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF
+AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q
+GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz
+Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2
+G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi
+l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3
+smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE
+BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w
+MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290
+IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC
+SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1
+ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv
+UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX
+4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9
+KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/
+gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb
+rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ
+51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F
+be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe
+KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F
+v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn
+fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7
+jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz
+ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt
+ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL
+e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70
+jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz
+WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V
+SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j
+pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX
+X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok
+fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R
+K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU
+ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU
+LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT
+LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEU
+MBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3
+b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMw
+MTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML
+QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYD
+VQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUA
+A4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ul
+CDtbKRY654eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6n
+tGO0/7Gcrjyvd7ZWxbWroulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyl
+dI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1Zmne3yzxbrww2ywkEtvrNTVokMsAsJch
+PXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJuiGMx1I4S+6+JNM3GOGvDC
++Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8wHQYDVR0O
+BBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8E
+BTADAQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBl
+MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFk
+ZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENB
+IFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxtZBsfzQ3duQH6lmM0MkhHma6X
+7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0PhiVYrqW9yTkkz
+43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY
+eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJl
+pz/+0WatC7xrmYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOA
+WiFeIc9TVPC6b4nbqKqVz4vjccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU
+MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs
+IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290
+MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux
+FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h
+bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v
+dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt
+H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9
+uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX
+mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX
+a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN
+E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0
+WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD
+VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0
+Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU
+cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx
+IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN
+AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH
+YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5
+6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC
+Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX
+c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a
+mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFVTCCBD2gAwIBAgIEO/OB0DANBgkqhkiG9w0BAQUFADBsMQswCQYDVQQGEwJj
+aDEOMAwGA1UEChMFYWRtaW4xETAPBgNVBAsTCFNlcnZpY2VzMSIwIAYDVQQLExlD
+ZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRYwFAYDVQQDEw1BZG1pbi1Sb290LUNB
+MB4XDTAxMTExNTA4NTEwN1oXDTIxMTExMDA3NTEwN1owbDELMAkGA1UEBhMCY2gx
+DjAMBgNVBAoTBWFkbWluMREwDwYDVQQLEwhTZXJ2aWNlczEiMCAGA1UECxMZQ2Vy
+dGlmaWNhdGlvbiBBdXRob3JpdGllczEWMBQGA1UEAxMNQWRtaW4tUm9vdC1DQTCC
+ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMvgr0QUIv5qF0nyXZ3PXAJi
+C4C5Wr+oVTN7oxIkXkxvO0GJToM9n7OVJjSmzBL0zJ2HXj0MDRcvhSY+KiZZc6Go
+vDvr5Ua481l7ILFeQAFtumeza+vvxeL5Nd0Maga2miiacLNAKXbAcUYRa0Ov5VZB
+++YcOYNNt/aisWbJqA2y8He+NsEgJzK5zNdayvYXQTZN+7tVgWOck16Da3+4FXdy
+fH1NCWtZlebtMKtERtkVAaVbiWW24CjZKAiVfggjsiLo3yVMPGj3budLx5D9hEEm
+vlyDOtcjebca+AcZglppWMX/iHIrx7740y0zd6cWEqiLIcZCrnpkr/KzwO135GkC
+AwEAAaOCAf0wggH5MA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIASBkTCBjjCBiwYI
+YIV0AREDAQAwfzArBggrBgEFBQcCAjAfGh1UaGlzIGlzIHRoZSBBZG1pbi1Sb290
+LUNBIENQUzBQBggrBgEFBQcCARZEaHR0cDovL3d3dy5pbmZvcm1hdGlrLmFkbWlu
+LmNoL1BLSS9saW5rcy9DUFNfMl8xNl83NTZfMV8xN18zXzFfMC5wZGYwfwYDVR0f
+BHgwdjB0oHKgcKRuMGwxFjAUBgNVBAMTDUFkbWluLVJvb3QtQ0ExIjAgBgNVBAsT
+GUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxETAPBgNVBAsTCFNlcnZpY2VzMQ4w
+DAYDVQQKEwVhZG1pbjELMAkGA1UEBhMCY2gwHQYDVR0OBBYEFIKf+iNzIPGXi7JM
+Tb5CxX9mzWToMIGZBgNVHSMEgZEwgY6AFIKf+iNzIPGXi7JMTb5CxX9mzWTooXCk
+bjBsMQswCQYDVQQGEwJjaDEOMAwGA1UEChMFYWRtaW4xETAPBgNVBAsTCFNlcnZp
+Y2VzMSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRYwFAYDVQQD
+Ew1BZG1pbi1Sb290LUNBggQ784HQMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0B
+AQUFAAOCAQEAeE96XCYRpy6umkPKXDWCRn7INo96ZrWpMggcDORuofHIwdTkgOeM
+vWOxDN/yuT7CC3FAaUajbPRbDw0hRMcqKz0aC8CgwcyIyhw/rFK29mfNTG3EviP9
+QSsEbnelFnjpm1wjz4EaBiFjatwpUbI6+Zv3XbEt9QQXBn+c6DeFLe4xvC4B+MTr
+a440xTk59pSYux8OHhEvqIwHCkiijGqZhTS3KmGFeBopaR+dJVBRBMoXwzk4B3Hn
+0Zib1dEYFZa84vPJZyvxCbLOnPRDJgH6V2uQqbG+6DXVaf/wORVOvF/wzzv0viM/
+RWbEtJZdvo8N3sdtCULzifnxP/V0T9+4ZQ==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
+dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
+cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP
+Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr
+ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL
+MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1
+yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr
+VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/
+nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
+KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG
+XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj
+vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt
+Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g
+N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC
+nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
+dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
+cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y
+YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua
+kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL
+QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp
+6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG
+yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i
+QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
+KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO
+tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu
+QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ
+Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u
+olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48
+x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC
+VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ
+cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ
+BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt
+VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D
+0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9
+ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G
+A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G
+A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs
+aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I
+flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz
+dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG
+A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U
+cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf
+qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ
+JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ
++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS
+s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5
+HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7
+70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG
+V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S
+qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S
+5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia
+C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX
+OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE
+FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
+BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2
+KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg
+Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B
+8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ
+MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc
+0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ
+u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF
+u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH
+YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8
+GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO
+RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e
+KeC2uAloGRwYQw==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIIGDCCBgCgAwIBAgIGAT8vMXfmMA0GCSqGSIb3DQEBCwUAMIIBCjELMAkGA1UE
+BhMCRVMxEjAQBgNVBAgMCUJhcmNlbG9uYTFYMFYGA1UEBwxPQmFyY2Vsb25hIChz
+ZWUgY3VycmVudCBhZGRyZXNzIGF0IGh0dHA6Ly93d3cuYW5mLmVzL2VzL2FkZHJl
+c3MtZGlyZWNjaW9uLmh0bWwgKTEnMCUGA1UECgweQU5GIEF1dG9yaWRhZCBkZSBD
+ZXJ0aWZpY2FjaW9uMRcwFQYDVQQLDA5BTkYgQ2xhc2UgMSBDQTEaMBgGCSqGSIb3
+DQEJARYLaW5mb0BhbmYuZXMxEjAQBgNVBAUTCUc2MzI4NzUxMDEbMBkGA1UEAwwS
+QU5GIEdsb2JhbCBSb290IENBMB4XDTEzMDYxMDE3NDUzOFoXDTMzMDYwNTE3NDUz
+OFowggEKMQswCQYDVQQGEwJFUzESMBAGA1UECAwJQmFyY2Vsb25hMVgwVgYDVQQH
+DE9CYXJjZWxvbmEgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgaHR0cDovL3d3dy5h
+bmYuZXMvZXMvYWRkcmVzcy1kaXJlY2Npb24uaHRtbCApMScwJQYDVQQKDB5BTkYg
+QXV0b3JpZGFkIGRlIENlcnRpZmljYWNpb24xFzAVBgNVBAsMDkFORiBDbGFzZSAx
+IENBMRowGAYJKoZIhvcNAQkBFgtpbmZvQGFuZi5lczESMBAGA1UEBRMJRzYzMjg3
+NTEwMRswGQYDVQQDDBJBTkYgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEB
+AQUAA4ICDwAwggIKAoICAQDHPi9xy4wynbcUbWjorVUgQKeUAVh937J7P37XmsfH
+ZLOBZKIIlhhCtRwnDlg7x+BUvtJOTkIbEGMujDygUQ2s3HDYr5I41hTyM2Pl0cq2
+EuSGEbPIHb3dEX8NAguFexM0jqNjrreN3hM2/+TOkAxSdDJP2aMurlySC5zwl47K
+ZLHtcVrkZnkDa0o5iN24hJT4vBDT4t2q9khQ+qb1D8KgCOb02r1PxWXu3vfd6Ha2
+mkdB97iGuEh5gO2n4yOmFS5goFlVA2UdPbbhJsb8oKVKDd+YdCKGQDCkQyG4AjmC
+YiNm3UPG/qtftTH5cWri67DlLtm6fyUFOMmO6NSh0RtR745pL8GyWJUanyq/Q4bF
+HQB21E+WtTsCaqjGaoFcrBunMypmCd+jUZXl27TYENRFbrwNdAh7m2UztcIyb+Sg
+VJFyfvVsBQNvnp7GPimVxXZNc4VpxEXObRuPWQN1oZN/90PcZVqTia/SHzEyTryL
+ckhiLG3jZiaFZ7pTZ5I9wti9Pn+4kOHvE3Y/4nEnUo4mTxPX9pOlinF+VCiybtV2
+u1KSlc+YaIM7VmuyndDZCJRXm3v0/qTE7t5A5fArZl9lvibigMbWB8fpD+c1GpGH
+Eo8NRY0lkaM+DkIqQoaziIsz3IKJrfdKaq9bQMSlIfameKBZ8fNYTBZrH9KZAIhz
+YwIDAQABo4IBfjCCAXowHQYDVR0OBBYEFIf6nt9SdnXsSUogb1twlo+d77sXMB8G
+A1UdIwQYMBaAFIf6nt9SdnXsSUogb1twlo+d77sXMA8GA1UdEwEB/wQFMAMBAf8w
+DgYDVR0PAQH/BAQDAgEGMIIBFQYDVR0RBIIBDDCCAQiCEWh0dHA6Ly93d3cuYW5m
+LmVzgQtpbmZvQGFuZi5lc6SB5TCB4jE0MDIGA1UECQwrR3JhbiBWaWEgZGUgbGVz
+IENvcnRzIENhdGFsYW5lcy4gOTk2LiAwODAxODESMBAGA1UEBwwJQmFyY2Vsb25h
+MScwJQYDVQQKDB5BTkYgQXV0b3JpZGFkIGRlIENlcnRpZmljYWNpb24xEjAQBgNV
+BAUTCUc2MzI4NzUxMDFZMFcGA1UECwxQSW5zY3JpdGEgZW4gZWwgTWluaXN0ZXJp
+byBkZWwgSW50ZXJpb3IgZGUgRXNwYcOxYSBjb24gZWwgbnVtZXJvIG5hY2lvbmFs
+IDE3MS40NDMwDQYJKoZIhvcNAQELBQADggIBAIgR9tFTZ9BCYg+HViMxOfF0MHN2
+Pe/eC128ARdS+GH8A4thtbqiH/SOYbWofO/0zssHhNKa5iQEj45lCAb8BANpWJMD
+nWkPr6jq2+50a6d0MMgSS2l1rvjSF+3nIrEuicshHXSTi3q/vBLKr7uGKMVFaM68
+XAropIwk6ndlA0JseARSPsbetv7ALESMIZAxlHV1TcctYHd0bB3c/Jz+PLszJQqs
+Cg/kBPo2D111OXZkIY8W/fJuG9veR783khAK2gUnC0zLLCNsYzEbdGt8zUmBsAsM
+cGxqGm6B6vDXd65OxWqw13xdq/24+5R8Ng1PF9tvfjZkUFBF30CxjWur7P90WiKI
+G7IGfr6BE1NgXlhEQQu4F+HizB1ypEPzGWltecXQ4yOzO+H0WfFTjLTYX6VSveyW
+DQV18ixF8M4tHP/SwNE+yyv2b2JJ3/3RpxjtFlLk+opJ574x0gD/dMJuWTH0JqVY
+3PbRfE1jIxFpk164Qz/Xp7H7w7f6xh+tQCkBs3PUYmnGIZcPwq44Q6JHlCNsKx4K
+hxfggTvRCk4w79cUID45c2qDsRCqTPoOo/cbOpcfVhbH9LdMORpmuLwNogRZEUSE
+fWpqR9q+0kcQf4zGSWIURIyDrogdpDgoHDxktqgMgc+qA4ZE2WQl1D8hmev53A46
+lUSrWUiWfDXtK3ux
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFkjCCA3qgAwIBAgIIAeDltYNno+AwDQYJKoZIhvcNAQEMBQAwZzEbMBkGA1UE
+AwwSQXBwbGUgUm9vdCBDQSAtIEcyMSYwJAYDVQQLDB1BcHBsZSBDZXJ0aWZpY2F0
+aW9uIEF1dGhvcml0eTETMBEGA1UECgwKQXBwbGUgSW5jLjELMAkGA1UEBhMCVVMw
+HhcNMTQwNDMwMTgxMDA5WhcNMzkwNDMwMTgxMDA5WjBnMRswGQYDVQQDDBJBcHBs
+ZSBSb290IENBIC0gRzIxJjAkBgNVBAsMHUFwcGxlIENlcnRpZmljYXRpb24gQXV0
+aG9yaXR5MRMwEQYDVQQKDApBcHBsZSBJbmMuMQswCQYDVQQGEwJVUzCCAiIwDQYJ
+KoZIhvcNAQEBBQADggIPADCCAgoCggIBANgREkhI2imKScUcx+xuM23+TfvgHN6s
+XuI2pyT5f1BrTM65MFQn5bPW7SXmMLYFN14UIhHF6Kob0vuy0gmVOKTvKkmMXT5x
+ZgM4+xb1hYjkWpIMBDLyyED7Ul+f9sDx47pFoFDVEovy3d6RhiPw9bZyLgHaC/Yu
+OQhfGaFjQQscp5TBhsRTL3b2CtcM0YM/GlMZ81fVJ3/8E7j4ko380yhDPLVoACVd
+J2LT3VXdRCCQgzWTxb+4Gftr49wIQuavbfqeQMpOhYV4SbHXw8EwOTKrfl+q04tv
+ny0aIWhwZ7Oj8ZhBbZF8+NfbqOdfIRqMM78xdLe40fTgIvS/cjTf94FNcX1RoeKz
+8NMoFnNvzcytN31O661A4T+B/fc9Cj6i8b0xlilZ3MIZgIxbdMYs0xBTJh0UT8TU
+gWY8h2czJxQI6bR3hDRSj4n4aJgXv8O7qhOTH11UL6jHfPsNFL4VPSQ08prcdUFm
+IrQB1guvkJ4M6mL4m1k8COKWNORj3rw31OsMiANDC1CvoDTdUE0V+1ok2Az6DGOe
+HwOx4e7hqkP0ZmUoNwIx7wHHHtHMn23KVDpA287PT0aLSmWaasZobNfMmRtHsHLD
+d4/E92GcdB/O/WuhwpyUgquUoue9G7q5cDmVF8Up8zlYNPXEpMZ7YLlmQ1A/bmH8
+DvmGqmAMQ0uVAgMBAAGjQjBAMB0GA1UdDgQWBBTEmRNsGAPCe8CjoA1/coB6HHcm
+jTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQwF
+AAOCAgEAUabz4vS4PZO/Lc4Pu1vhVRROTtHlznldgX/+tvCHM/jvlOV+3Gp5pxy+
+8JS3ptEwnMgNCnWefZKVfhidfsJxaXwU6s+DDuQUQp50DhDNqxq6EWGBeNjxtUVA
+eKuowM77fWM3aPbn+6/Gw0vsHzYmE1SGlHKy6gLti23kDKaQwFd1z4xCfVzmMX3z
+ybKSaUYOiPjjLUKyOKimGY3xn83uamW8GrAlvacp/fQ+onVJv57byfenHmOZ4VxG
+/5IFjPoeIPmGlFYl5bRXOJ3riGQUIUkhOb9iZqmxospvPyFgxYnURTbImHy99v6Z
+SYA7LNKmp4gDBDEZt7Y6YUX6yfIjyGNzv1aJMbDZfGKnexWoiIqrOEDCzBL/FePw
+N983csvMmOa/orz6JopxVtfnJBtIRD6e/J/JzBrsQzwBvDR4yGn1xuZW7AYJNpDr
+FEobXsmII9oDMJELuDY++ee1KG++P+w8j2Ud5cAeh6Squpj9kuNsJnfdBrRkBof0
+Tta6SqoWqPQFZ2aWuuJVecMsXUmPgEkrihLHdoBR37q9ZV0+N0djMenl9MU/S60E
+inpxLK8JQzcPqOMyT/RFtm2XNuyE9QoB6he7hY1Ck3DDUOUUi78/w0EP3SIEIwiK
+um1xRKtzCTrJ+VKACd+66eYWyi4uTLLT3OUEVLLUNIAytbwPF+E=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIICQzCCAcmgAwIBAgIILcX8iNLFS5UwCgYIKoZIzj0EAwMwZzEbMBkGA1UEAwwS
+QXBwbGUgUm9vdCBDQSAtIEczMSYwJAYDVQQLDB1BcHBsZSBDZXJ0aWZpY2F0aW9u
+IEF1dGhvcml0eTETMBEGA1UECgwKQXBwbGUgSW5jLjELMAkGA1UEBhMCVVMwHhcN
+MTQwNDMwMTgxOTA2WhcNMzkwNDMwMTgxOTA2WjBnMRswGQYDVQQDDBJBcHBsZSBS
+b290IENBIC0gRzMxJjAkBgNVBAsMHUFwcGxlIENlcnRpZmljYXRpb24gQXV0aG9y
+aXR5MRMwEQYDVQQKDApBcHBsZSBJbmMuMQswCQYDVQQGEwJVUzB2MBAGByqGSM49
+AgEGBSuBBAAiA2IABJjpLz1AcqTtkyJygRMc3RCV8cWjTnHcFBbZDuWmBSp3ZHtf
+TjjTuxxEtX/1H7YyYl3J6YRbTzBPEVoA/VhYDKX1DyxNB0cTddqXl5dvMVztK517
+IDvYuVTZXpmkOlEKMaNCMEAwHQYDVR0OBBYEFLuw3qFYM4iapIqZ3r6966/ayySr
+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMAoGCCqGSM49BAMDA2gA
+MGUCMQCD6cHEFl4aXTQY2e3v9GwOAEZLuN+yRhHFD/3meoyhpmvOwgPUnPWTxnS4
+at+qIxUCMG1mihDK1A3UT82NQz60imOlM27jbdoXt2QfyFMm+YhidDkLF1vLUagM
+6BgD56KyKA==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEuzCCA6OgAwIBAgIBAjANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQGEwJVUzET
+MBEGA1UEChMKQXBwbGUgSW5jLjEmMCQGA1UECxMdQXBwbGUgQ2VydGlmaWNhdGlv
+biBBdXRob3JpdHkxFjAUBgNVBAMTDUFwcGxlIFJvb3QgQ0EwHhcNMDYwNDI1MjE0
+MDM2WhcNMzUwMjA5MjE0MDM2WjBiMQswCQYDVQQGEwJVUzETMBEGA1UEChMKQXBw
+bGUgSW5jLjEmMCQGA1UECxMdQXBwbGUgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkx
+FjAUBgNVBAMTDUFwcGxlIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
+ggEKAoIBAQDkkakJH5HbHkdQ6wXtXnmELes2oldMVeyLGYne+Uts9QerIjAC6Bg+
++FAJ039BqJj50cpmnCRrEdCju+QbKsMflZ56DKRHi1vUFjczy8QPTc4UadHJGXL1
+XQ7Vf1+b8iUDulWPTV0N8WQ1IxVLFVkds5T39pyez1C6wVhQZ48ItCD3y6wsIG9w
+tj8BMIy3Q88PnT3zK0koGsj+zrW5DtleHNbLPbU6rfQPDgCSC7EhFi501TwN22IW
+q6NxkkdTVcGvL0Gz+PvjcM3mo0xFfh9Ma1CWQYnEdGILEINBhzOKgbEwWOxaBDKM
+aLOPHd5lc/9nXmW8Sdh2nzMUZaF3lMktAgMBAAGjggF6MIIBdjAOBgNVHQ8BAf8E
+BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUK9BpR5R2Cf70a40uQKb3
+R01/CF4wHwYDVR0jBBgwFoAUK9BpR5R2Cf70a40uQKb3R01/CF4wggERBgNVHSAE
+ggEIMIIBBDCCAQAGCSqGSIb3Y2QFATCB8jAqBggrBgEFBQcCARYeaHR0cHM6Ly93
+d3cuYXBwbGUuY29tL2FwcGxlY2EvMIHDBggrBgEFBQcCAjCBthqBs1JlbGlhbmNl
+IG9uIHRoaXMgY2VydGlmaWNhdGUgYnkgYW55IHBhcnR5IGFzc3VtZXMgYWNjZXB0
+YW5jZSBvZiB0aGUgdGhlbiBhcHBsaWNhYmxlIHN0YW5kYXJkIHRlcm1zIGFuZCBj
+b25kaXRpb25zIG9mIHVzZSwgY2VydGlmaWNhdGUgcG9saWN5IGFuZCBjZXJ0aWZp
+Y2F0aW9uIHByYWN0aWNlIHN0YXRlbWVudHMuMA0GCSqGSIb3DQEBBQUAA4IBAQBc
+NplMLXi37Yyb3PN3m/J20ncwT8EfhYOFG5k9RzfyqZtAjizUsZAS2L70c5vu0mQP
+y3lPNNiiPvl4/2vIB+x9OYOLUyDTOMSxv5pPCmv/K/xZpwUJfBdAVhEedNO3iyM7
+R6PVbyTi69G3cN8PReEnyvFteO3ntRcXqNx+IjXKJdXZD9Zr1KIkIxH3oayPc4Fg
+xhtbCS+SsvhESPBgOJ4V9T0mZyCKM2r3DYLP3uujL/lTaltkwGMzd/c6ByxW69oP
+IQ7aunMZT7XZNn/Bh1XZp5m5MkL72NVxnn6hUrcbvZNCJBIqxw8dtk2cXmPIS4AX
+UKqK1drk/NAJBzewdXUh
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFujCCBKKgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBhjELMAkGA1UEBhMCVVMx
+HTAbBgNVBAoTFEFwcGxlIENvbXB1dGVyLCBJbmMuMS0wKwYDVQQLEyRBcHBsZSBD
+b21wdXRlciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxKTAnBgNVBAMTIEFwcGxlIFJv
+b3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MB4XDTA1MDIxMDAwMTgxNFoXDTI1MDIx
+MDAwMTgxNFowgYYxCzAJBgNVBAYTAlVTMR0wGwYDVQQKExRBcHBsZSBDb21wdXRl
+ciwgSW5jLjEtMCsGA1UECxMkQXBwbGUgQ29tcHV0ZXIgQ2VydGlmaWNhdGUgQXV0
+aG9yaXR5MSkwJwYDVQQDEyBBcHBsZSBSb290IENlcnRpZmljYXRlIEF1dGhvcml0
+eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOSRqQkfkdseR1DrBe1e
+eYQt6zaiV0xV7IsZid75S2z1B6siMALoGD74UAnTf0GomPnRymacJGsR0KO75Bsq
+wx+VnnoMpEeLW9QWNzPLxA9NzhRp0ckZcvVdDtV/X5vyJQO6VY9NXQ3xZDUjFUsV
+WR2zlPf2nJ7PULrBWFBnjwi0IPfLrCwgb3C2PwEwjLdDzw+dPfMrSSgayP7OtbkO
+2V4c1ss9tTqt9A8OAJILsSEWLnTVPA3bYharo3GSR1NVwa8vQbP4++NwzeajTEV+
+H0xrUJZBicR0YgsQg0GHM4qBsTBY7FoEMoxos48d3mVz/2deZbxJ2HafMxRloXeU
+yS0CAwEAAaOCAi8wggIrMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/
+MB0GA1UdDgQWBBQr0GlHlHYJ/vRrjS5ApvdHTX8IXjAfBgNVHSMEGDAWgBQr0GlH
+lHYJ/vRrjS5ApvdHTX8IXjCCASkGA1UdIASCASAwggEcMIIBGAYJKoZIhvdjZAUB
+MIIBCTBBBggrBgEFBQcCARY1aHR0cHM6Ly93d3cuYXBwbGUuY29tL2NlcnRpZmlj
+YXRlYXV0aG9yaXR5L3Rlcm1zLmh0bWwwgcMGCCsGAQUFBwICMIG2GoGzUmVsaWFu
+Y2Ugb24gdGhpcyBjZXJ0aWZpY2F0ZSBieSBhbnkgcGFydHkgYXNzdW1lcyBhY2Nl
+cHRhbmNlIG9mIHRoZSB0aGVuIGFwcGxpY2FibGUgc3RhbmRhcmQgdGVybXMgYW5k
+IGNvbmRpdGlvbnMgb2YgdXNlLCBjZXJ0aWZpY2F0ZSBwb2xpY3kgYW5kIGNlcnRp
+ZmljYXRpb24gcHJhY3RpY2Ugc3RhdGVtZW50cy4wRAYDVR0fBD0wOzA5oDegNYYz
+aHR0cHM6Ly93d3cuYXBwbGUuY29tL2NlcnRpZmljYXRlYXV0aG9yaXR5L3Jvb3Qu
+Y3JsMFUGCCsGAQUFBwEBBEkwRzBFBggrBgEFBQcwAoY5aHR0cHM6Ly93d3cuYXBw
+bGUuY29tL2NlcnRpZmljYXRlYXV0aG9yaXR5L2Nhc2lnbmVycy5odG1sMA0GCSqG
+SIb3DQEBBQUAA4IBAQCd2i0oWC99dgS5BNM+zrdmY06PL9T+S61yvaM5xlJNBZhS
+9YlRASR5vhoy9+VEi0tEBzmC1lrKtCBe2a4VXR2MHTK/ODFiSF3H4ZCx+CRA+F9Y
+m1FdV53B5f88zHIhbsTp6aF31ywXJsM/65roCwO66bNKcuszCVut5mIxauivL9Wv
+Hld2j383LS4CXN1jyfJxuCZA3xWNdUQ/eb3mHZnhQyw+rW++uaT+DjUZUWOxw961
+kj5ReAFziqQjyqSI8R5cH0EWLX6VCqrpiUGYGxrdyyC/R14MJsVVNU3GMIuZZxTH
+CR+6R8faAQmHJEKVvRNgGQrv6n8Obs3BREM6StXj
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIID9zCCAt+gAwIBAgILMTI1MzcyODI4MjgwDQYJKoZIhvcNAQELBQAwWDELMAkG
+A1UEBhMCSlAxHDAaBgNVBAoTE0phcGFuZXNlIEdvdmVybm1lbnQxDTALBgNVBAsT
+BEdQS0kxHDAaBgNVBAMTE0FwcGxpY2F0aW9uQ0EyIFJvb3QwHhcNMTMwMzEyMTUw
+MDAwWhcNMzMwMzEyMTUwMDAwWjBYMQswCQYDVQQGEwJKUDEcMBoGA1UEChMTSmFw
+YW5lc2UgR292ZXJubWVudDENMAsGA1UECxMER1BLSTEcMBoGA1UEAxMTQXBwbGlj
+YXRpb25DQTIgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKaq
+rSVl1gAR1uh6dqr05rRL88zDUrSNrKZPtZJxb0a11a2LEiIXJc5F6BR6hZrkIxCo
++rFnUOVtR+BqiRPjrq418fRCxQX3TZd+PCj8sCaRHoweOBqW3FhEl2LjMsjRFUFN
+dZh4vqtoqV7tR76kuo6hApfek3SZbWe0BSXulMjtqqS6MmxCEeu+yxcGkOGThchk
+KM4fR8fAXWDudjbcMztR63vPctgPeKgZggiQPhqYjY60zxU2pm7dt+JNQCBT2XYq
+0HisifBPizJtROouurCp64ndt295D6uBbrjmiykLWa+2SQ1RLKn9nShjZrhwlXOa
+2Po7M7xCQhsyrLEy+z0CAwEAAaOBwTCBvjAdBgNVHQ4EFgQUVqesqgIdsqw9kA6g
+by5Bxnbne9owDgYDVR0PAQH/BAQDAgEGMHwGA1UdEQR1MHOkcTBvMQswCQYDVQQG
+EwJKUDEYMBYGA1UECgwP5pel5pys5Zu95pS/5bqcMRswGQYDVQQLDBLmlL/lupzo
+qo3oqLzln7rnm6QxKTAnBgNVBAMMIOOCouODl+ODquOCseODvOOCt+ODp+ODs0NB
+MiBSb290MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAH+aCXWs
+B9FydC53VzDCBJzUgKaD56WgG5/+q/OAvdVKo6GPtkxgEefK4WCB10jBIFmlYTKL
+nZ6X02aD2mUuWD7b5S+lzYxzplG+WCigeVxpL0PfY7KJR8q73rk0EWOgDiUX5Yf0
+HbCwpc9BqHTG6FPVQvSCLVMJEWgmcZR1E02qdog8dLHW40xPYsNJTE5t8XB+w3+m
+Bcx4m+mB26jIx1ye/JKSLaaX8ji1bnOVDMA/zqaUMLX6BbfeniCq/BNkyYq6ZO/i
+Y+TYmK5rtT6mVbgzPixy+ywRAPtbFi+E0hOe+gXFwctyTiLdhMpLvNIthhoEdlkf
+SUJiOxMfFui61/0=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE
+AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG
+EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM
+FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC
+REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp
+Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM
+VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+
+SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ
+4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L
+cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi
+eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV
+HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG
+A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3
+DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j
+vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP
+DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc
+maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D
+lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv
+KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UE
+BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h
+cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEy
+MzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg
+Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9
+thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM
+cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG
+L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i
+NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h
+X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b
+m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy
+Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja
+EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T
+KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF
+6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh
+OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYD
+VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNHDhpkLzCBpgYD
+VR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp
+cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBv
+ACAAZABlACAAbABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBl
+AGwAbwBuAGEAIAAwADgAMAAxADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF
+661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx51tkljYyGOylMnfX40S2wBEqgLk9
+am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qkR71kMrv2JYSiJ0L1
+ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaPT481
+PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS
+3a/DTg4fJl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5k
+SeTy36LssUzAKh3ntLFlosS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF
+3dvd6qJ2gHN99ZwExEWN57kci57q13XRcrHedUTnQn3iV2t93Jm8PYMo6oCTjcVM
+ZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoRsaS8I8nkvof/uZS2+F0g
+StRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTDKCOM/icz
+Q0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQB
+jLMi6Et8Vcad+qMUu2WFbm5PEn4KPJ2V
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIJmzCCB4OgAwIBAgIBATANBgkqhkiG9w0BAQwFADCCAR4xPjA8BgNVBAMTNUF1
+dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIFJhaXogZGVsIEVzdGFkbyBWZW5lem9s
+YW5vMQswCQYDVQQGEwJWRTEQMA4GA1UEBxMHQ2FyYWNhczEZMBcGA1UECBMQRGlz
+dHJpdG8gQ2FwaXRhbDE2MDQGA1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0
+aWZpY2FjaW9uIEVsZWN0cm9uaWNhMUMwQQYDVQQLEzpTdXBlcmludGVuZGVuY2lh
+IGRlIFNlcnZpY2lvcyBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMSUwIwYJ
+KoZIhvcNAQkBFhZhY3JhaXpAc3VzY2VydGUuZ29iLnZlMB4XDTEwMTIyMjE4MDgy
+MVoXDTMwMTIxNzIzNTk1OVowggEeMT4wPAYDVQQDEzVBdXRvcmlkYWQgZGUgQ2Vy
+dGlmaWNhY2lvbiBSYWl6IGRlbCBFc3RhZG8gVmVuZXpvbGFubzELMAkGA1UEBhMC
+VkUxEDAOBgNVBAcTB0NhcmFjYXMxGTAXBgNVBAgTEERpc3RyaXRvIENhcGl0YWwx
+NjA0BgNVBAoTLVNpc3RlbWEgTmFjaW9uYWwgZGUgQ2VydGlmaWNhY2lvbiBFbGVj
+dHJvbmljYTFDMEEGA1UECxM6U3VwZXJpbnRlbmRlbmNpYSBkZSBTZXJ2aWNpb3Mg
+ZGUgQ2VydGlmaWNhY2lvbiBFbGVjdHJvbmljYTElMCMGCSqGSIb3DQEJARYWYWNy
+YWl6QHN1c2NlcnRlLmdvYi52ZTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC
+ggIBAME77xNS8ZlW47RsBeEaaRZhJoZ4rw785UAFCuPZOAVMqNS1wMYqzy95q6Gk
+UO81ER/ugiQX/KMcq/4HBn83fwdYWxPZfwBfK7BP2p/JsFgzYeFP0BXOLmvoJIzl
+Jb6FW+1MPwGBjuaZGFImWZsSmGUclb51mRYMZETh9/J5CLThR1exStxHQptwSzra
+zNFpkQY/zmj7+YZNA9yDoroVFv6sybYOZ7OxNDo7zkSLo45I7gMwtxqWZ8VkJZkC
+8+p0dX6mkhUT0QAV64Zc9HsZiH/oLhEkXjhrgZ28cF73MXIqLx1fyM4kPH1yOJi/
+R72nMwL7D+Sd6mZgI035TxuHXc2/uOwXfKrrTjaJDz8Jp6DdessOkxIgkKXRjP+F
+K3ze3n4NUIRGhGRtyvEjK95/2g02t6PeYiYVGur6ruS49n0RAaSS0/LJb6XzaAAe
+0mmO2evnEqxIKwy2mZRNPfAVW1l3wCnWiUwryBU6OsbFcFFrQm+00wOicXvOTHBM
+aiCVAVZTb9RSLyi+LJ1llzJZO3pq3IRiiBj38Nooo+2ZNbMEciSgmig7YXaUcmud
+SVQvLSL+Yw+SqawyezwZuASbp7d/0rutQ59d81zlbMt3J7yB567rT2IqIydQ8qBW
+k+fmXzghX+/FidYsh/aK+zZ7Wy68kKHuzEw1Vqkat5DGs+VzAgMBAAGjggLeMIIC
+2jASBgNVHRMBAf8ECDAGAQH/AgECMDcGA1UdEgQwMC6CD3N1c2NlcnRlLmdvYi52
+ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAzNi0wMB0GA1UdDgQWBBStuyIdxuDS
+Aaj9dlBSk+2YwU2u0zCCAVAGA1UdIwSCAUcwggFDgBStuyIdxuDSAaj9dlBSk+2Y
+wU2u06GCASakggEiMIIBHjE+MDwGA1UEAxM1QXV0b3JpZGFkIGRlIENlcnRpZmlj
+YWNpb24gUmFpeiBkZWwgRXN0YWRvIFZlbmV6b2xhbm8xCzAJBgNVBAYTAlZFMRAw
+DgYDVQQHEwdDYXJhY2FzMRkwFwYDVQQIExBEaXN0cml0byBDYXBpdGFsMTYwNAYD
+VQQKEy1TaXN0ZW1hIE5hY2lvbmFsIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25p
+Y2ExQzBBBgNVBAsTOlN1cGVyaW50ZW5kZW5jaWEgZGUgU2VydmljaW9zIGRlIENl
+cnRpZmljYWNpb24gRWxlY3Ryb25pY2ExJTAjBgkqhkiG9w0BCQEWFmFjcmFpekBz
+dXNjZXJ0ZS5nb2IudmWCAQEwDgYDVR0PAQH/BAQDAgEGMDcGA1UdEQQwMC6CD3N1
+c2NlcnRlLmdvYi52ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAzNi0wMFQGA1Ud
+HwRNMEswJKAioCCGHmhodHA6Ly93d3cuc3VzY2VydGUuZ29iLnZlL2xjcjAjoCGg
+H4YdbGRhcDovL2FjcmFpei5zdXNjZXJ0ZS5nb2IudmUwNwYIKwYBBQUHAQEEKzAp
+MCcGCCsGAQUFBzABhhtoaHRwOi8vb2NzcC5zdXNjZXJ0ZS5nb2IudmUwQAYDVR0g
+BDkwNzA1BgVghl4BAjAsMCoGCCsGAQUFBwIBFh5odHRwOi8vd3d3LnN1c2NlcnRl
+LmdvYi52ZS9kcGMwDQYJKoZIhvcNAQEMBQADggIBAK4qy/zmZ9zBwfW3yOYtLcBT
+Oy4szJyPz7/RhNH3bPVH7HbDTGpi6JZ4YXdXMBeJE5qBF4a590Kgj8Rlnltt+Rbo
+OFQOU1UDqKuTdBsA//Zry5899fmn8jBUkg4nh09jhHHbLlaUScdz704Zz2+UVg7i
+s/r3Legxap60KzmdrmTAE9VKte1TQRgavQwVX5/2mO/J+SCas//UngI+h8SyOucq
+mjudYEgBrZaodUsagUfn/+AzFNrGLy+al+5nZeHb8JnCfLHWS0M9ZyhgoeO/czyn
+99+5G93VWNv4zfc4KiavHZKrkn8F9pg0ycIZh+OwPT/RE2zq4gTazBMlP3ACIe/p
+olkNaOEa8KvgzW96sjBZpMW49zFmyINYkcj+uaNCJrVGsXgdBmkuRGJNWFZ9r0cG
+woIaxViFBypsz045r1ESfYPlfDOavBhZ/giR/Xocm9CHkPRY2BApMMR0DUCyGETg
+Ql+L3kfdTKzuDjUp2DM9FqysQmaM81YDZufWkMhlZPfHwC7KbNougoLroa5Umeos
+bqAXWmk46SwIdWRPLLqbUpDTKooynZKpSYIkkotdgJoVZUUCY+RCO8jsVPEU6ece
+SxztNUm5UOta1OJPMwSAKRHOo3ilVb9c6lAixDdvV8MeNbqe6asM1mpCHWbJ/0rg
+5Ls9Cxx8hracyp0ev7b0
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ
+RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD
+VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX
+DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y
+ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy
+VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr
+mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr
+IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK
+mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu
+XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy
+dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye
+jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1
+BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3
+DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92
+9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx
+jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0
+Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz
+ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS
+R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDjjCCAnagAwIBAgIIKv++n6Lw6YcwDQYJKoZIhvcNAQEFBQAwKDELMAkGA1UE
+BhMCQkUxGTAXBgNVBAMTEEJlbGdpdW0gUm9vdCBDQTIwHhcNMDcxMDA0MTAwMDAw
+WhcNMjExMjE1MDgwMDAwWjAoMQswCQYDVQQGEwJCRTEZMBcGA1UEAxMQQmVsZ2l1
+bSBSb290IENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMZzQh6S
+/3UPi790hqc/7bIYLS2X+an7mEoj39WN4IzGMhwWLQdC1i22bi+n9fzGhYJdld61
+IgDMqFNAn68KNaJ6x+HK92AQZw6nUHMXU5WfIp8MXW+2QbyM69odRr2nlL/zGsvU
++40OHjPIltfsjFPekx40HopQcSZYtF3CiInaYNKJIT/e1wEYNm7hLHADBGXvmAYr
+XR5i3FVr/mZkIV/4L+HXmymvb82fqgxG0YjFnaKVn6w/Fa7yYd/vw2uaItgscf1Y
+HewApDgglVrH1Tdjuk+bqv5WRi5j2Qsj1Yr6tSPwiRuhFA0m2kHwOI8w7QUmecFL
+TqG4flVSOmlGhHUCAwEAAaOBuzCBuDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
+BAUwAwEB/zBCBgNVHSAEOzA5MDcGBWA4CQEBMC4wLAYIKwYBBQUHAgEWIGh0dHA6
+Ly9yZXBvc2l0b3J5LmVpZC5iZWxnaXVtLmJlMB0GA1UdDgQWBBSFiuv0xbu+DlkD
+lN7WgAEV4xCcOTARBglghkgBhvhCAQEEBAMCAAcwHwYDVR0jBBgwFoAUhYrr9MW7
+vg5ZA5Te1oABFeMQnDkwDQYJKoZIhvcNAQEFBQADggEBAFHYhd27V2/MoGy1oyCc
+UwnzSgEMdL8rs5qauhjyC4isHLMzr87lEwEnkoRYmhC598wUkmt0FoqW6FHvv/pK
+JaeJtmMrXZRY0c8RcrYeuTlBFk0pvDVTC9rejg7NqZV3JcqUWumyaa7YwBO+mPyW
+nIR/VRPmPIfjvCCkpDZoa01gZhz5v6yAlGYuuUGK02XThIAC71AdXkbc98m6tTR8
+KvPG2F9fVJ3bTc0R5/0UAoNmXsimABKgX77OFP67H6dh96tK8QYUn8pJQsKpvO2F
+sauBQeYNxUJpU4c5nUwfAA4+Bw11V0SoU7Q2dmSZ3G7rPUZuFF1eR1ONeE3gJ7uO
+hXY=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd
+MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg
+Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow
+TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw
+HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB
+BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr
+6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV
+L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91
+1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx
+MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ
+QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB
+arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr
+Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi
+FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS
+P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN
+9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP
+AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz
+uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h
+9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s
+A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t
+OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo
++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7
+KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2
+DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us
+H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ
+I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7
+5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h
+3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz
+Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd
+MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg
+Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow
+TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw
+HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB
+BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y
+ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E
+N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9
+tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX
+0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c
+/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X
+KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY
+zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS
+O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D
+34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP
+K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3
+AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv
+Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj
+QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV
+cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS
+IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2
+HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa
+O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv
+033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u
+dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE
+kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41
+3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD
+u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq
+4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFaTCCA1GgAwIBAgIJAMMDmu5QkG4oMA0GCSqGSIb3DQEBBQUAMFIxCzAJBgNV
+BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu
+MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIxMB4XDTEyMDcxOTA5MDY1NloXDTQy
+MDcxOTA5MDY1NlowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx
+EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjEw
+ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCqw3j33Jijp1pedxiy3QRk
+D2P9m5YJgNXoqqXinCaUOuiZc4yd39ffg/N4T0Dhf9Kn0uXKE5Pn7cZ3Xza1lK/o
+OI7bm+V8u8yN63Vz4STN5qctGS7Y1oprFOsIYgrY3LMATcMjfF9DCCMyEtztDK3A
+fQ+lekLZWnDZv6fXARz2m6uOt0qGeKAeVjGu74IKgEH3G8muqzIm1Cxr7X1r5OJe
+IgpFy4QxTaz+29FHuvlglzmxZcfe+5nkCiKxLU3lSCZpq+Kq8/v8kiky6bM+TR8n
+oc2OuRf7JT7JbvN32g0S9l3HuzYQ1VTW8+DiR0jm3hTaYVKvJrT1cU/J19IG32PK
+/yHoWQbgCNWEFVP3Q+V8xaCJmGtzxmjOZd69fwX3se72V6FglcXM6pM6vpmumwKj
+rckWtc7dXpl4fho5frLABaTAgqWjR56M6ly2vGfb5ipN0gTco65F97yLnByn1tUD
+3AjLLhbKXEAz6GfDLuemROoRRRw1ZS0eRWEkG4IupZ0zXWX4Qfkuy5Q/H6MMMSRE
+7cderVC6xkGbrPAXZcD4XW9boAo0PO7X6oifmPmvTiT6l7Jkdtqr9O3jw2Dv1fkC
+yC2fg69naQanMVXVz0tv/wQFx1isXxYb5dKj6zHbHzMVTdDypVP1y+E9Tmgt2BLd
+qvLmTZtJ5cUoobqwWsagtQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud
+DwEB/wQEAwIBBjAdBgNVHQ4EFgQUiQq0OJMa5qvum5EY+fU8PjXQ04IwDQYJKoZI
+hvcNAQEFBQADggIBADKL9p1Kyb4U5YysOMo6CdQbzoaz3evUuii+Eq5FLAR0rBNR
+xVgYZk2C2tXck8An4b58n1KeElb21Zyp9HWc+jcSjxyT7Ff+Bw+r1RL3D65hXlaA
+SfX8MPWbTx9BLxyE04nH4toCdu0Jz2zBuByDHBb6lM19oMgY0sidbvW9adRtPTXo
+HqJPYNcHKfyyo6SdbhWSVhlMCrDpfNIZTUJG7L399ldb3Zh+pE3McgODWF3vkzpB
+emOqfDqo9ayk0d2iLbYq/J8BjuIQscTK5GfbVSUZP/3oNn6z4eGBrxEWi1CXYBmC
+AMBrTXO40RMHPuq2MU/wQppt4hF05ZSsjYSVPCGvxdpHyN85YmLLW1AL14FABZyb
+7bq2ix4Eb5YgOe2kfSnbSM6C3NQCjR0EMVrHS/BsYVLXtFHCgWzN4funodKSds+x
+DzdYpPJScWc/DIh4gInByLUfkmO+p3qKViwaqKactV2zY9ATIKHrkWzQjX2v3wvk
+F7mGnjixlAxYjOBVqjtjbZqJYLhkKpLGN/R+Q0O3c+gB53+XD9fyexn9GtePyfqF
+a3qdnom2piiZk4hA9z7NUaPK6u95RyG1/jLix8NRb76AdPCkwzryT+lf3xkK8jsT
+Q6wxpLPn6/wY1gGp8yqPNg7rtLG8t0zJa7+h89n07eLw4+1knj0vllJPgFOL
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV
+BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu
+MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy
+MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx
+EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw
+ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe
+NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH
+PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I
+x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe
+QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR
+yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO
+QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912
+H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ
+QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD
+i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs
+nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1
+rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud
+DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI
+hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM
+tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf
+GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb
+lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka
++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal
+TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i
+nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3
+gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr
+G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os
+zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x
+L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV
+BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X
+DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ
+BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4
+QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny
+gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw
+zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q
+130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2
+JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw
+DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw
+ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT
+AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj
+AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG
+9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h
+bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc
+fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu
+HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w
+t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw
+WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjET
+MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAk
+BgNVBAMMHUNlcnRpbm9taXMgLSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4
+Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNl
+cnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYwJAYDVQQDDB1DZXJ0
+aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQADggIP
+ADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jY
+F1AMnmHawE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N
+8y4oH3DfVS9O7cdxbwlyLu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWe
+rP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K
+/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92NjMD2AR5vpTESOH2VwnHu
+7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9qc1pkIuVC
+28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6
+lSTClrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1E
+nn1So2+WLhl+HPNbxxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB
+0iSVL1N6aaLwD4ZFjliCK0wi1F6g530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql09
+5gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna4NH4+ej9Uji29YnfAgMBAAGj
+WzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBQN
+jLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ
+KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9s
+ov3/4gbIOZ/xWqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZM
+OH8oMDX/nyNTt7buFHAAQCvaR6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q
+619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40nJ+U8/aGH88bc62UeYdocMMzpXDn
+2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1BCxMjidPJC+iKunqj
+o3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjvJL1v
+nxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG
+5ERQL1TEqkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWq
+pdEdnV1j6CTmNhTih60bWfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZb
+dsLLO7XSAPCjDuGtbkD326C00EauFddEwk01+dIL8hf2rGbVJLJP0RyZwG71fet0
+BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/vgt2Fl43N+bYdJeimUV5
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFkjCCA3qgAwIBAgIBATANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJGUjET
+MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxHTAb
+BgNVBAMTFENlcnRpbm9taXMgLSBSb290IENBMB4XDTEzMTAyMTA5MTcxOFoXDTMz
+MTAyMTA5MTcxOFowWjELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMx
+FzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMR0wGwYDVQQDExRDZXJ0aW5vbWlzIC0g
+Um9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANTMCQosP5L2
+fxSeC5yaah1AMGT9qt8OHgZbn1CF6s2Nq0Nn3rD6foCWnoR4kkjW4znuzuRZWJfl
+LieY6pOod5tK8O90gC3rMB+12ceAnGInkYjwSond3IjmFPnVAy//ldu9n+ws+hQV
+WZUKxkd8aRi5pwP5ynapz8dvtF4F/u7BUrJ1Mofs7SlmO/NKFoL21prbcpjp3vDF
+TKWrteoB4owuZH9kb/2jJZOLyKIOSY008B/sWEUuNKqEUL3nskoTuLAPrjhdsKkb
+5nPJWqHZZkCqqU2mNAKthH6yI8H7KsZn9DS2sJVqM09xRLWtwHkziOC/7aOgFLSc
+CbAK42C++PhmiM1b8XcF4LVzbsF9Ri6OSyemzTUK/eVNfaoqoynHWmgE6OXWk6Ri
+wsXm9E/G+Z8ajYJJGYrKWUM66A0ywfRMEwNvbqY/kXPLynNvEiCL7sCCeN5LLsJJ
+wx3tFvYk9CcbXFcx3FXuqB5vbKziRcxXV4p1VxngtViZSTYxPDMBbRZKzbgqg4SG
+m/lg0h9tkQPTYKbVPZrdd5A9NaSfD171UkRpucC63M9933zZxKyGIjK8e2uR73r4
+F2iw4lNVYC2vPsKD2NkJK/DAZNuHi5HMkesE/Xa0lZrmFAYb1TQdvtj/dBxThZng
+WVJKYe2InmtJiUZ+IFrZ50rlau7SZRFDAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIB
+BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTvkUz1pcMw6C8I6tNxIqSSaHh0
+2TAfBgNVHSMEGDAWgBTvkUz1pcMw6C8I6tNxIqSSaHh02TANBgkqhkiG9w0BAQsF
+AAOCAgEAfj1U2iJdGlg+O1QnurrMyOMaauo++RLrVl89UM7g6kgmJs95Vn6RHJk/
+0KGRHCwPT5iVWVO90CLYiF2cN/z7ZMF4jIuaYAnq1fohX9B0ZedQxb8uuQsLrbWw
+F6YSjNRieOpWauwK0kDDPAUwPk2Ut59KA9N9J0u2/kTO+hkzGm2kQtHdzMjI1xZS
+g081lLMSVX3l4kLr5JyTCcBMWwerx20RoFAXlCOotQqSD7J6wWAsOMwaplv/8gzj
+qh8c3LigkyfeY+N/IZ865Z764BNqdeuWXGKRlI5nU7aJ+BIJy29SWwNyhlCVCNSN
+h4YVH5Uk2KRvms6knZtt0rJ2BobGVgjF6wnaNsIbW0G+YSrjcOa4pvi2WsS9Iff/
+ql+hbHY5ZtbqTFXhADObE5hjyW/QASAJN1LnDE8+zbz1X5YnpyACleAu6AdBBR8V
+btaw5BngDwKTACdyxYvRVB9dSsNAl35VpnzBMwQUAR1JIGkLGZOdblgi90AMRgwj
+Y/M50n92Uaf0yKHxDHYiI0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ
+8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nMcyrDflOR1m749fPH0FFNjkulW+YZFzvW
+gQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVrhkIGuUE=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT
+AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD
+QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP
+MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC
+ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do
+0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ
+UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d
+RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ
+OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv
+JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C
+AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O
+BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ
+LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY
+MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ
+44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I
+Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw
+i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN
+9u6wWk5JRFRYX0KD
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBM
+MRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBD
+QTAeFw0wMjA2MTExMDQ2MzlaFw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBM
+MRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBD
+QTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6xwS7TT3zNJc4YPk/E
+jG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdLkKWo
+ePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GI
+ULdtlkIJ89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapu
+Ob7kky/ZR6By6/qmW6/KUz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUg
+AKpoC6EahQGcxEZjgoi2IrHu/qpGWX7PNSzVttpd90gzFFS269lvzs2I1qsb2pY7
+HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEA
+uI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+GXYkHAQa
+TOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTg
+xSvgGrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1q
+CjqTE5s7FCMTY5w/0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5x
+O/fIR/RpbxXyEV6DHpx8Uq79AtoSqFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs
+6GAqm4VKQPNriiTsBhYscw==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB
+gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu
+QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG
+A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz
+OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ
+VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3
+b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA
+DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn
+0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB
+OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE
+fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E
+Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m
+o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i
+sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW
+OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez
+Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS
+adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n
+3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD
+AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC
+AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ
+F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf
+CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29
+XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm
+djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/
+WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb
+AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq
+P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko
+b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj
+XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P
+5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi
+DrW5viSP
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM
+MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D
+ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU
+cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3
+WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg
+Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw
+IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH
+UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM
+TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU
+BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM
+kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x
+AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV
+HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y
+sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL
+I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8
+J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY
+VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI
+03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD
+TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y
+aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx
+MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j
+aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP
+T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03
+sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL
+TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5
+/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp
+7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz
+EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt
+hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP
+a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot
+aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg
+TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV
+PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv
+cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL
+tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd
+BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB
+ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT
+ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL
+jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS
+ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy
+P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19
+xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d
+Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN
+5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe
+/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z
+AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ
+5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYD
+VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0
+IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3
+MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xKTAnBgNVBAMTIENoYW1iZXJz
+IG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEyMjk1MFoXDTM4MDcz
+MTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBj
+dXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIw
+EAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEp
+MCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0G
+CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW9
+28sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKAXuFixrYp4YFs8r/lfTJq
+VKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorjh40G072Q
+DuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR
+5gN/ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfL
+ZEFHcpOrUMPrCXZkNNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05a
+Sd+pZgvMPMZ4fKecHePOjlO+Bd5gD2vlGts/4+EhySnB8esHnFIbAURRPHsl18Tl
+UlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331lubKgdaX8ZSD6e2wsWsSaR6s
++12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ0wlf2eOKNcx5
+Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj
+ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAx
+hduub+84Mxh2EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNV
+HQ4EFgQU+SSsD7K1+HnA+mCIG8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1
++HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpN
+YWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29t
+L2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVy
+ZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAt
+IDIwMDiCCQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRV
+HSAAMCowKAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20w
+DQYJKoZIhvcNAQEFBQADggIBAJASryI1wqM58C7e6bXpeHxIvj99RZJe6dqxGfwW
+PJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH3qLPaYRgM+gQDROpI9CF
+5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbURWpGqOt1
+glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaH
+FoI6M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2
+pSB7+R5KBWIBpih1YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MD
+xvbxrN8y8NmBGuScvfaAFPDRLLmF9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QG
+tjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcKzBIKinmwPQN/aUv0NCB9szTq
+jktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvGnrDQWzilm1De
+fhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg
+OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZ
+d0jQ
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEn
+MCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQL
+ExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMg
+b2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAxNjEzNDNaFw0zNzA5MzAxNjEzNDRa
+MH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZpcm1hIFNBIENJRiBB
+ODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3JnMSIw
+IAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0B
+AQEFAAOCAQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtb
+unXF/KGIJPov7coISjlUxFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0d
+BmpAPrMMhe5cG3nCYsS4No41XQEMIwRHNaqbYE6gZj3LJgqcQKH0XZi/caulAGgq
+7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jWDA+wWFjbw2Y3npuRVDM3
+0pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFVd9oKDMyX
+roDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIG
+A1UdEwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5j
+aGFtYmVyc2lnbi5vcmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p
+26EpW1eLTXYGduHRooowDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIA
+BzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hhbWJlcnNpZ24ub3JnMCcGA1Ud
+EgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYDVR0gBFEwTzBN
+BgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz
+aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEB
+AAxBl8IahsAifJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZd
+p0AJPaxJRUXcLo0waLIJuvvDL8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi
+1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wNUPf6s+xCX6ndbcj0dc97wXImsQEc
+XCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/nADydb47kMgkdTXg0
+eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1erfu
+tGWaIZDgqtCYvDi1czyL+Nw=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDQzCCAiugAwIBAgIQX/h7KCtU3I1CoxW1aMmt/zANBgkqhkiG9w0BAQUFADA1
+MRYwFAYDVQQKEw1DaXNjbyBTeXN0ZW1zMRswGQYDVQQDExJDaXNjbyBSb290IENB
+IDIwNDgwHhcNMDQwNTE0MjAxNzEyWhcNMjkwNTE0MjAyNTQyWjA1MRYwFAYDVQQK
+Ew1DaXNjbyBTeXN0ZW1zMRswGQYDVQQDExJDaXNjbyBSb290IENBIDIwNDgwggEg
+MA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCwmrmrp68Kd6ficba0ZmKUeIhH
+xmJVhEAyv8CrLqUccda8bnuoqrpu0hWISEWdovyD0My5jOAmaHBKeN8hF570YQXJ
+FcjPFto1YYmUQ6iEqDGYeJu5Tm8sUxJszR2tKyS7McQr/4NEb7Y9JHcJ6r8qqB9q
+VvYgDxFUl4F1pyXOWWqCZe+36ufijXWLbvLdT6ZeYpzPEApk0E5tzivMW/VgpSdH
+jWn0f84bcN5wGyDWbs2mAag8EtKpP6BrXruOIIt6keO1aO6g58QBdKhTCytKmg9l
+Eg6CTY5j/e/rmxrbU6YTYK/CfdfHbBcl1HP7R2RQgYCUTOG/rksc35LtLgXfAgED
+o1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUJ/PI
+FR5umgIJFq0roIlgX9p7L6owEAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEF
+BQADggEBAJ2dhISjQal8dwy3U8pORFBi71R803UXHOjgxkhLtv5MOhmBVrBW7hmW
+Yqpao2TB9k5UM8Z3/sUcuuVdJcr18JOagxEu5sv4dEX+5wW4q+ffy0vhN4TauYuX
+cB7w4ovXsNgOnbFp1iqRe6lJT37mjpXYgyc81WhJDtSd9i7rp77rMKSsH0T8lasz
+Bvt9YAretIpjsJyp8qS5UwGH0GikJ3+r/+n6yUA4iGe0OcaEb1fJU9u6ju7AQ7L4
+CYNu/2bPPu8Xs1gYJQk0XuPL1hS27PKSb3TkL4Eq1ZKR4OCXPDJoBYVL0fdX4lId
+kxpUnwVwwEpxYB5DC2Ae/qPOgRnhCzU=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAw
+PTELMAkGA1UEBhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFz
+cyAyIFByaW1hcnkgQ0EwHhcNOTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9
+MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2VydHBsdXMxGzAZBgNVBAMTEkNsYXNz
+IDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANxQ
+ltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR5aiR
+VhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyL
+kcAbmXuZVg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCd
+EgETjdyAYveVqUSISnFOYFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yas
+H7WLO7dDWWuwJKZtkIvEcupdM5i3y95ee++U8Rs+yskhwcWYAqqi9lt3m/V+llU0
+HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRMECDAGAQH/AgEKMAsGA1Ud
+DwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJYIZIAYb4
+QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMu
+Y29tL0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/
+AN9WM2K191EBkOvDP9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8
+yfFC82x/xXp8HVGIutIKPidd3i1RTtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMR
+FcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+7UCmnYR0ObncHoUW2ikbhiMA
+ybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW//1IMwrh3KWB
+kJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7
+l7+ijrRU
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDoTCCAomgAwIBAgIQKTZHquOKrIZKI1byyrdhrzANBgkqhkiG9w0BAQUFADBO
+MQswCQYDVQQGEwJ1czEYMBYGA1UEChMPVS5TLiBHb3Zlcm5tZW50MQ0wCwYDVQQL
+EwRGQkNBMRYwFAYDVQQDEw1Db21tb24gUG9saWN5MB4XDTA3MTAxNTE1NTgwMFoX
+DTI3MTAxNTE2MDgwMFowTjELMAkGA1UEBhMCdXMxGDAWBgNVBAoTD1UuUy4gR292
+ZXJubWVudDENMAsGA1UECxMERkJDQTEWMBQGA1UEAxMNQ29tbW9uIFBvbGljeTCC
+ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJeNvTMn5K1b+3i9L0dHbsd4
+6ZOcpN7JHP0vGzk4rEcXwH53KQA7Ax9oD81Npe53uCxiazH2+nIJfTApBnznfKM9
+hBiKHa4skqgf6F5PjY7rPxr4nApnnbBnTfAu0DDew5SwoM8uCjR/VAnTNr2kSVdS
+c+md/uRIeUYbW40y5KVIZPMiDZKdCBW/YDyD90ciJSKtKXG3d+8XyaK2lF7IMJCk
+FEhcVlcLQUwF1CpMP64Sm1kRdXAHImktLNMxzJJ+zM2kfpRHqpwJCPZLr1LoakCR
+xVW9QLHIbVeGlRfmH3O+Ry4+i0wXubklHKVSFzYIWcBCvgortFZRPBtVyYyQd+sC
+AwEAAaN7MHkwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O
+BBYEFC9Yl9ipBZilVh/72at17wI8NjTHMBIGCSsGAQQBgjcVAQQFAgMBAAEwIwYJ
+KwYBBAGCNxUCBBYEFHa3YJbdFFYprHWF03BjwbxHhhyLMA0GCSqGSIb3DQEBBQUA
+A4IBAQBgrvNIFkBypgiIybxHLCRLXaCRc+1leJDwZ5B6pb8KrbYq+Zln34PFdx80
+CTj5fp5B4Ehg/uKqXYeI6oj9XEWyyWrafaStsU+/HA2fHprA1RRzOCuKeEBuMPdi
+4c2Z/FFpZ2wR3bgQo2jeJqVW/TZsN5hs++58PGxrcD/3SDcJjwtCga1GRrgLgwb0
+Gzigf0/NC++DiYeXHIowZ9z9VKEDfgHLhUyxCynDvux84T8PCVI8L6eaSP436REG
+WOE2QYrEtr+O3c5Ks7wawM36GpnScZv6z7zyxFSjiDV2zBssRm8MtNHDYXaSdBHq
+S4CNHIkRi+xb/xfJSPzn4AYR4oRe
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB
+gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV
+BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw
+MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl
+YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P
+RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0
+aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3
+UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI
+2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8
+Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp
++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+
+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O
+nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW
+/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g
+PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u
+QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY
+SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv
+IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/
+RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4
+zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd
+BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB
+ZQ==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL
+MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
+BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT
+IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw
+MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy
+ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N
+T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv
+biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR
+FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J
+cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW
+BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
+BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm
+fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv
+GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB
+hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV
+BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5
+MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT
+EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR
+Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR
+6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X
+pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC
+9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV
+/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf
+Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z
++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w
+qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah
+SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC
+u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf
+Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq
+crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E
+FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB
+/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl
+wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM
+4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV
+2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna
+FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ
+CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK
+boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke
+jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL
+S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb
+QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl
+0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB
+NVOFBkpdn627G190
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDkzCCAnugAwIBAgIQFBOWgxRVjOp7Y+X8NId3RDANBgkqhkiG9w0BAQUFADA0
+MRMwEQYDVQQDEwpDb21TaWduIENBMRAwDgYDVQQKEwdDb21TaWduMQswCQYDVQQG
+EwJJTDAeFw0wNDAzMjQxMTMyMThaFw0yOTAzMTkxNTAyMThaMDQxEzARBgNVBAMT
+CkNvbVNpZ24gQ0ExEDAOBgNVBAoTB0NvbVNpZ24xCzAJBgNVBAYTAklMMIIBIjAN
+BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA8ORUaSvTx49qROR+WCf4C9DklBKK
+8Rs4OC8fMZwG1Cyn3gsqrhqg455qv588x26i+YtkbDqthVVRVKU4VbirgwTyP2Q2
+98CNQ0NqZtH3FyrV7zb6MBBC11PN+fozc0yz6YQgitZBJzXkOPqUm7h65HkfM/sb
+2CEJKHxNGGleZIp6GZPKfuzzcuc3B1hZKKxC+cX/zT/npfo4sdAMx9lSGlPWgcxC
+ejVb7Us6eva1jsz/D3zkYDaHL63woSV9/9JLEYhwVKZBqGdTUkJe5DSe5L6j7Kpi
+Xd3DTKaCQeQzC6zJMw9kglcq/QytNuEMrkvF7zuZ2SOzW120V+x0cAwqTwIDAQAB
+o4GgMIGdMAwGA1UdEwQFMAMBAf8wPQYDVR0fBDYwNDAyoDCgLoYsaHR0cDovL2Zl
+ZGlyLmNvbXNpZ24uY28uaWwvY3JsL0NvbVNpZ25DQS5jcmwwDgYDVR0PAQH/BAQD
+AgGGMB8GA1UdIwQYMBaAFEsBmz5WGmU2dst7l6qSBe4y5ygxMB0GA1UdDgQWBBRL
+AZs+VhplNnbLe5eqkgXuMucoMTANBgkqhkiG9w0BAQUFAAOCAQEA0Nmlfv4pYEWd
+foPPbrxHbvUanlR2QnG0PFg/LUAlQvaBnPGJEMgOqnhPOAlXsDzACPw1jvFIUY0M
+cXS6hMTXcpuEfDhOZAYnKuGntewImbQKDdSFc8gS4TXt8QUxHXOZDOuWyt3T5oWq
+8Ir7dcHyCTxlZWTzTNity4hp8+SDtwy9F1qWF8pb/627HOkthIDYIb6FUtnUdLlp
+hbpN7Sgy6/lhSuTENh4Z3G+EER+V9YMoGKgzkkMn3V0TBEVPh9VGzT2ouvDzuFYk
+Res3x+F2T3I5GN9+dHLHcy056mDmrRGiVod7w2ia/viMcKjfZTL0pECMocJEAw6U
+AGegcQCCSA==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIGATCCA+mgAwIBAgIRAI9hcRW6eVgXjH0ROqzW264wDQYJKoZIhvcNAQELBQAw
+RTEfMB0GA1UEAxMWQ29tU2lnbiBHbG9iYWwgUm9vdCBDQTEVMBMGA1UEChMMQ29t
+U2lnbiBMdGQuMQswCQYDVQQGEwJJTDAeFw0xMTA3MTgxMDI0NTRaFw0zNjA3MTYx
+MDI0NTVaMEUxHzAdBgNVBAMTFkNvbVNpZ24gR2xvYmFsIFJvb3QgQ0ExFTATBgNV
+BAoTDENvbVNpZ24gTHRkLjELMAkGA1UEBhMCSUwwggIiMA0GCSqGSIb3DQEBAQUA
+A4ICDwAwggIKAoICAQCyKClzKh3rm6n1nvigmV/VU1D4hSwYW2ro3VqpzpPo0Ph3
+3LguqjXd5juDwN4mpxTpD99d7Xu5X6KGTlMVtfN+bTbA4t3x7DU0Zqn0BE5XuOgs
+3GLH41Vmr5wox1bShVpM+IsjcN4E/hMnDtt/Bkb5s33xCG+ohz5dlq0gA9qfr/g4
+O9lkHZXTCeYrmVzd/il4x79CqNvGkdL3um+OKYl8rg1dPtD8UsytMaDgBAopKR+W
+igc16QJzCbvcinlETlrzP/Ny76BWPnAQgaYBULax/Q5thVU+N3sEOKp6uviTdD+X
+O6i96gARU4H0xxPFI75PK/YdHrHjfjQevXl4J37FJfPMSHAbgPBhHC+qn/014DOx
+46fEGXcdw2BFeIIIwbj2GH70VyJWmuk/xLMCHHpJ/nIF8w25BQtkPpkwESL6esaU
+b1CyB4Vgjyf16/0nRiCAKAyC/DY/Yh+rDWtXK8c6QkXD2XamrVJo43DVNFqGZzbf
+5bsUXqiVDOz71AxqqK+p4ek9374xPNMJ2rB5MLPAPycwI0bUuLHhLy6nAIFHLhut
+TNI+6Y/soYpi5JSaEjcY7pxI8WIkUAzr2r+6UoT0vAdyOt7nt1y8844a7szo/aKf
+woziHl2O1w6ZXUC30K+ptXVaOiW79pBDcbLZ9ZdbONhS7Ea3iH4HJNwktrBJLQID
+AQABo4HrMIHoMA8GA1UdEwEB/wQFMAMBAf8wgYQGA1UdHwR9MHswPKA6oDiGNmh0
+dHA6Ly9mZWRpci5jb21zaWduLmNvLmlsL2NybC9jb21zaWduZ2xvYmFscm9vdGNh
+LmNybDA7oDmgN4Y1aHR0cDovL2NybDEuY29tc2lnbi5jby5pbC9jcmwvY29tc2ln
+bmdsb2JhbHJvb3RjYS5jcmwwDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBQCRZPY
+DUhirGm6rgZbPvuqJpFQsTAfBgNVHSMEGDAWgBQCRZPYDUhirGm6rgZbPvuqJpFQ
+sTANBgkqhkiG9w0BAQsFAAOCAgEAk1V5V9701xsfy4mfX+tP9Ln5e9h3N+QMwUfj
+kr+k3e8iXOqADjTpUHeBkEee5tJq09ZLp/43F5tZ2eHdYq2ZEX7iWHCnOQet6Yw9
+SU1TahsrGDA6JJD9sdPFnNZooGsU1520e0zNB0dNWwxrWAmu4RsBxvEpWCJbvzQL
+dOfyX85RWwli81OiVMBc5XvJ1mxsIIqli45oRynKtsWP7E+b0ISJ1n+XFLdQo/Nm
+WA/5sDfT0F5YPzWdZymudMbXitimxC+n4oQE4mbQ4Zm718Iwg3pP9gMMcSc7Qc1J
+kJHPH9O7gVubkKHuSYj9T3Ym6c6egL1pb4pz/uT7cT26Fiopc/jdqbe2EAfoJZkv
+hlp/zdzOoXTWjiKNA5zmgWnZn943FuE9KMRyKtyi/ezJXCh8ypnqLIKxeFfZl69C
+BwJsPXUTuqj8Fic0s3aZmmr7C4jXycP+Q8V+akMEIoHAxcd960b4wVWKqOcI/kZS
+Q0cYqWOY1LNjznRt9lweWEfwDBL3FhrHOmD4++1N3FkkM4W+Q1b2WOL24clDMj+i
+2n9Iw0lc1llHMSMvA5D0vpsXZpOgcCVahfXczQKi9wQ3oZyonJeWx4/rXdMtagAB
+VBYGFuMEUEQtybI+eIbnp5peO2WAAblQI4eTy/jMVowe5tfMEXovV3sz9ULgmGb3
+DscLP1I=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDqzCCApOgAwIBAgIRAMcoRwmzuGxFjB36JPU2TukwDQYJKoZIhvcNAQEFBQAw
+PDEbMBkGA1UEAxMSQ29tU2lnbiBTZWN1cmVkIENBMRAwDgYDVQQKEwdDb21TaWdu
+MQswCQYDVQQGEwJJTDAeFw0wNDAzMjQxMTM3MjBaFw0yOTAzMTYxNTA0NTZaMDwx
+GzAZBgNVBAMTEkNvbVNpZ24gU2VjdXJlZCBDQTEQMA4GA1UEChMHQ29tU2lnbjEL
+MAkGA1UEBhMCSUwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGtWhf
+HZQVw6QIVS3joFd67+l0Kru5fFdJGhFeTymHDEjWaueP1H5XJLkGieQcPOqs49oh
+gHMhCu95mGwfCP+hUH3ymBvJVG8+pSjsIQQPRbsHPaHA+iqYHU4Gk/v1iDurX8sW
+v+bznkqH7Rnqwp9D5PGBpX8QTz7RSmKtUxvLg/8HZaWSLWapW7ha9B20IZFKF3ue
+Mv5WJDmyVIRD9YTC2LxBkMyd1mja6YJQqTtoz7VdApRgFrFD2UNd3V2Hbuq7s8lr
+9gOUCXDeFhF6K+h2j0kQmHe5Y1yLM5d19guMsqtb3nQgJT/j8xH5h2iGNXHDHYwt
+6+UarA9z1YJZQIDTAgMBAAGjgacwgaQwDAYDVR0TBAUwAwEB/zBEBgNVHR8EPTA7
+MDmgN6A1hjNodHRwOi8vZmVkaXIuY29tc2lnbi5jby5pbC9jcmwvQ29tU2lnblNl
+Y3VyZWRDQS5jcmwwDgYDVR0PAQH/BAQDAgGGMB8GA1UdIwQYMBaAFMFL7XC29z58
+ADsAj8c+DkWfHl3sMB0GA1UdDgQWBBTBS+1wtvc+fAA7AI/HPg5Fnx5d7DANBgkq
+hkiG9w0BAQUFAAOCAQEAFs/ukhNQq3sUnjO2QiBq1BW9Cav8cujvR3qQrFHBZE7p
+iL1DRYHjZiM/EoZNGeQFsOY3wo3aBijJD4mkU6l1P7CW+6tMM1X5eCZGbxs2mPtC
+dsGCuY7e+0X5YxtiOzkGynd6qDwJz2w2PQ8KRUtpFhpFfTMDZflScZAmlaxMDPWL
+kz/MdXSFmLr/YnpNH4n+rr2UAJm/EaXc4HnFFgt9AmEd6oX5AhVP51qJThRv4zdL
+hfXBPGHg/QVBspJ/wx2g0K5SZGBrGMYmnNj1ZOQ2GmKfig8+/21OGVZOIJFsnzQz
+OjRXUDpvgV4GxvU+fE6OK85lBi5d0ipTdF7Tbieejw==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF
+MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD
+bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha
+ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM
+HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03
+UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42
+tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R
+ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM
+lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp
+/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G
+A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G
+A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj
+dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy
+MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl
+cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js
+L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL
+BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni
+acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0
+o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K
+zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8
+PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y
+Johw1+qRzT65ysCQblrGXnRl11z+o+I=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF
+MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD
+bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw
+NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV
+BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn
+ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0
+3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z
+qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR
+p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8
+HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw
+ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea
+HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw
+Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh
+c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E
+RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt
+dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku
+Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp
+3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05
+nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF
+CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na
+xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX
+KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEc
+MBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2Vj
+IFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENB
+IDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5MjM1OTAwWjBxMQswCQYDVQQGEwJE
+RTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxl
+U2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290
+IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEU
+ha88EOQ5bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhC
+QN/Po7qCWWqSG6wcmtoIKyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1Mjwr
+rFDa1sPeg5TKqAyZMg4ISFZbavva4VhYAUlfckE8FQYBjl2tqriTtM2e66foai1S
+NNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aKSe5TBY8ZTNXeWHmb0moc
+QqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTVjlsB9WoH
+txa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAP
+BgNVHRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC
+AQEAlGRZrTlk5ynrE/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756Abrsp
+tJh6sTtU6zkXR34ajgv8HzFZMQSyzhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpa
+IzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8rZ7/gFnkm0W09juwzTkZmDLl
+6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4Gdyd1Lx+4ivn+
+xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU
+Cm26OWMohpLzGITY+9HPBVZkVw==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
+b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
+cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c
+JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP
+mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+
+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4
+VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/
+AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB
+AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
+BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun
+pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC
+dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf
+fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm
+NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx
+H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe
++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
+b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
+cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA
+n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc
+biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp
+EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA
+bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu
+YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB
+AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW
+BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI
+QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I
+0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni
+lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9
+B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv
+ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo
+IhNzbM8m9Yop5w==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw
+CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu
+ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg
+RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV
+UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu
+Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq
+hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf
+Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q
+RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
+BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD
+AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY
+JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv
+6pZjamVFkpUBtA==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD
+QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
+b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB
+CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97
+nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt
+43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P
+T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4
+gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO
+BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR
+TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw
+DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr
+hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg
+06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF
+PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls
+YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk
+CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH
+MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
+b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI
+2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx
+1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ
+q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz
+tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ
+vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP
+BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV
+5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY
+1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4
+NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG
+Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91
+8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe
+pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl
+MrY=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw
+CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu
+ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe
+Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw
+EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x
+IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF
+K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG
+fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO
+Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd
+BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx
+AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/
+oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8
+sycX
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j
+ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL
+MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3
+LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug
+RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm
++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW
+PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM
+xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB
+Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3
+hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg
+EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF
+MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA
+FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec
+nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z
+eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF
+hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2
+Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe
+vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep
++OkuE6N36B9K
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg
+RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV
+UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu
+Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y
+ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If
+xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV
+ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO
+DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ
+jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/
+CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi
+EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM
+fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY
+uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK
+chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t
+9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD
+ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2
+SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd
++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc
+fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa
+sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N
+cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N
+0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie
+4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI
+r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1
+/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm
+gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBb
+MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3Qx
+ETAPBgNVBAsTCERTVCBBQ0VTMRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0w
+MzExMjAyMTE5NThaFw0xNzExMjAyMTE5NThaMFsxCzAJBgNVBAYTAlVTMSAwHgYD
+VQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UECxMIRFNUIEFDRVMx
+FzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPu
+ktKe1jzIDZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7
+gLFViYsx+tC3dr5BPTCapCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZH
+fAjIgrrep4c9oW24MFbCswKBXy314powGCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4a
+ahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPyMjwmR/onJALJfh1biEIT
+ajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1UdEwEB/wQF
+MAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rk
+c3QuY29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjto
+dHRwOi8vd3d3LnRydXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMt
+aW5kZXguaHRtbDAdBgNVHQ4EFgQUCXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZI
+hvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V25FYrnJmQ6AgwbN99Pe7lv7Uk
+QIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6tFr8hlxCBPeP/
+h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq
+nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpR
+rscL9yuwNwXsvFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf2
+9w4LTJxoeHtxMcfrHuBnQfO3oKfN5XozNmr6mis=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/
+MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT
+DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow
+PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD
+Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
+AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O
+rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq
+OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b
+xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw
+7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD
+aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV
+HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG
+SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69
+ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr
+AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz
+R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5
+JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo
+Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDOzCCAiOgAwIBAgIRANAeRlAAACmMAAAAAgAAAAIwDQYJKoZIhvcNAQEFBQAw
+PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD
+Ew5EU1QgUm9vdCBDQSBYNDAeFw0wMDA5MTMwNjIyNTBaFw0yMDA5MTMwNjIyNTBa
+MD8xJDAiBgNVBAoTG0RpZ2l0YWwgU2lnbmF0dXJlIFRydXN0IENvLjEXMBUGA1UE
+AxMORFNUIFJvb3QgQ0EgWDQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
+AQCthX3OFEYY8gSeIYur0O4ypOT68HnDrjLfIutL5PZHRwQGjzCPb9PFo/ihboJ8
+RvfGhBAqpQCo47zwYEhpWm1jB+L/OE/dBBiyn98krfU2NiBKSom2J58RBeAwHGEy
+cO+lewyjVvbDDLUy4CheY059vfMjPAftCRXjqSZIolQb9FdPcAoa90mFwB7rKniE
+J7vppdrUScSS0+eBrHSUPLdvwyn4RGp+lSwbWYcbg5EpSpE0GRJdchic0YDjvIoC
+YHpe7Rkj93PYRTQyU4bhC88ck8tMqbvRYqMRqR+vobbkrj5LLCOQCHV5WEoxWh+0
+E2SpIFe7RkV++MmpIAc0h1tZAgMBAAGjMjAwMA8GA1UdEwEB/wQFMAMBAf8wHQYD
+VR0OBBYEFPCD6nPIP1ubWzdf9UyPWvf0hki9MA0GCSqGSIb3DQEBBQUAA4IBAQCE
+G85wl5eEWd7adH6XW/ikGN5salvpq/Fix6yVTzE6CrhlP5LBdkf6kx1bSPL18M45
+g0rw2zA/MWOhJ3+S6U+BE0zPGCuu8YQaZibR7snm3HiHUaZNMu5c8D0x0bcMxDjY
+AVVcHCoNiL53Q4PLW27nbY6wwG0ffFKmgV3blxrYWfuUDgGpyPwHwkfVFvz9qjaV
+mf12VJffL6W8omBPtgteb6UaT/k1oJ7YI0ldGf+ngpVbRhD+LC3cUtT6GO/BEPZu
+8YTV/hbiDH5v3khVqMIeKT6o8IuXGG7F6a6vKwP1F1FwTXf4UC/ivhme7vdUH7B/
+Vv4AEbT8dNfEeFxrkDbh
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNV
+BAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBC
+aWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNV
+BAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQDDB9FLVR1
+Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMwNTEyMDk0OFoXDTIz
+MDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+
+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhp
+em1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN
+ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4vU/kwVRHoViVF56C/UY
+B4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vdhQd2h8y/L5VMzH2nPbxH
+D5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5KCKpbknSF
+Q9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEo
+q1+gElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3D
+k14opz8n8Y4e0ypQBaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcH
+fC425lAcP9tDJMW/hkd5s3kc91r0E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsut
+dEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gzrt48Ue7LE3wBf4QOXVGUnhMM
+ti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAqjqFGOjGY5RH8
+zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn
+rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUX
+U8u3Zg5mTPj5dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6
+Jyr+zE7S6E5UMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5
+XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAF
+Nzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAKkEh47U6YA5n+KGCR
+HTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jOXKqY
+GwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c
+77NCR807VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3
++GbHeJAAFS6LrVE1Uweoa2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WK
+vJUawSg5TB9D0pH0clmKuVb8P7Sd2nCcdlqMQ1DujjByTd//SffGqWfZbawCEeI6
+FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEVKV0jq9BgoRJP3vQXzTLl
+yb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gTDx4JnW2P
+AJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpD
+y4Q08ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8d
+NL/+I5c30jn6PQ0GC7TbO6Orb1wdtn7os4I07QZcJA==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIE5zCCA8+gAwIBAgIBADANBgkqhkiG9w0BAQUFADCBjTELMAkGA1UEBhMCQ0Ex
+EDAOBgNVBAgTB09udGFyaW8xEDAOBgNVBAcTB1Rvcm9udG8xHTAbBgNVBAoTFEVj
+aG93b3J4IENvcnBvcmF0aW9uMR8wHQYDVQQLExZDZXJ0aWZpY2F0aW9uIFNlcnZp
+Y2VzMRowGAYDVQQDExFFY2hvd29yeCBSb290IENBMjAeFw0wNTEwMDYxMDQ5MTNa
+Fw0zMDEwMDcxMDQ5MTNaMIGNMQswCQYDVQQGEwJDQTEQMA4GA1UECBMHT250YXJp
+bzEQMA4GA1UEBxMHVG9yb250bzEdMBsGA1UEChMURWNob3dvcnggQ29ycG9yYXRp
+b24xHzAdBgNVBAsTFkNlcnRpZmljYXRpb24gU2VydmljZXMxGjAYBgNVBAMTEUVj
+aG93b3J4IFJvb3QgQ0EyMIIBIDANBgkqhkiG9w0BAQEFAAOCAQ0AMIIBCAKCAQEA
+utU/5BkV15UBf+s+JQruKQxr77s3rjp/RpOtmhHILIiO5gsEWP8MMrfrVEiidjI6
+Qh6ans0KAWc2Dw0/j4qKAQzOSyAZgjcdypNTBZ7muv212DA2Pu41rXqwMrlBrVi/
+KTghfdLlNRu6JrC5y8HarrnRFSKF1Thbzz921kLDRoCi+FVs5eVuK5LvIfkhNAqA
+byrTgO3T9zfZgk8upmEkANPDL1+8y7dGPB/d6lk0I5mv8PESKX02TlvwgRSIiTHR
+k8++iOPLBWlGp7ZfqTEXkPUZhgrQQvxcrwCUo6mk8TqgxCDP5FgPoHFiPLef5szP
+ZLBJDWp7GLyE1PmkQI6WiwIBA6OCAVAwggFMMA8GA1UdEwEB/wQFMAMBAf8wCwYD
+VR0PBAQDAgEGMB0GA1UdDgQWBBQ74YEboKs/OyGC1eISrq5QqxSlEzCBugYDVR0j
+BIGyMIGvgBQ74YEboKs/OyGC1eISrq5QqxSlE6GBk6SBkDCBjTELMAkGA1UEBhMC
+Q0ExEDAOBgNVBAgTB09udGFyaW8xEDAOBgNVBAcTB1Rvcm9udG8xHTAbBgNVBAoT
+FEVjaG93b3J4IENvcnBvcmF0aW9uMR8wHQYDVQQLExZDZXJ0aWZpY2F0aW9uIFNl
+cnZpY2VzMRowGAYDVQQDExFFY2hvd29yeCBSb290IENBMoIBADBQBgNVHSAESTBH
+MEUGCysGAQQB+REKAQMBMDYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cuZWNob3dv
+cnguY29tL2NhL3Jvb3QyL2Nwcy5wZGYwDQYJKoZIhvcNAQEFBQADggEBAG+nrPi/
+0RpfEzrj02C6JGPUar4nbjIhcY6N7DWNeqBoUulBSIH/PYGNHYx7/lnJefiixPGE
+7TQ5xPgElxb9bK8zoAApO7U33OubqZ7M7DlHnFeCoOoIAZnG1kuwKwD5CXKB2a74
+HzcqNnFW0IsBFCYqrVh/rQgJOzDA8POGbH0DeD0xjwBBooAolkKT+7ZItJF1Pb56
+QpDL9G+16F7GkmnKlAIYT3QTS3yFGYChnJcd+6txUPhKi9sSOOmAIaKHnkH9Scz+
+A2cSi4A3wUYXVatuVNHpRb2lygfH3SuCX9MU8Ure3zBlSU1LALtMqI4JmcQmQpIq
+zIzvO2jHyu9PQqo=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1
+MQswCQYDVQQGEwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1
+czEoMCYGA1UEAwwfRUUgQ2VydGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYG
+CSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIwMTAxMDMwMTAxMDMwWhgPMjAzMDEy
+MTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlBUyBTZXJ0aWZpdHNl
+ZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRyZSBS
+b290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEB
+AQUAA4IBDwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUy
+euuOF0+W2Ap7kaJjbMeMTC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvO
+bntl8jixwKIy72KyaOBhU8E2lf/slLo2rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIw
+WFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw93X2PaRka9ZP585ArQ/d
+MtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtNP2MbRMNE
+1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYD
+VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/
+zQas8fElyalL1BSZMEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYB
+BQUHAwMGCCsGAQUFBwMEBggrBgEFBQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEF
+BQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+RjxY6hUFaTlrg4wCQiZrxTFGGV
+v9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqMlIpPnTX/dqQG
+E5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u
+uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIW
+iAYLtqZLICjU3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/v
+GVCJYMzpJJUPwssd8m92kMfMdcGWxZ0=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG
+A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3
+d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu
+dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq
+RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy
+MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD
+VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0
+L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g
+Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD
+ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi
+A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt
+ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH
+Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O
+BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC
+R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX
+hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC
+VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50
+cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs
+IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz
+dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy
+NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu
+dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt
+dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0
+aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T
+RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN
+cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW
+wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1
+U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0
+jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP
+BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN
+BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/
+jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ
+Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v
+1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R
+nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH
+VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC
+VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0
+Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW
+KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl
+cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw
+NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw
+NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy
+ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV
+BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ
+KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo
+Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4
+4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9
+KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI
+rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi
+94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB
+sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi
+gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo
+kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE
+vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA
+A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t
+O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua
+AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP
+9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/
+eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m
+0vdXcDazv/wor3ElhVsT/h5/WrQ8
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEXDCCA0SgAwIBAgIEOGO5ZjANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML
+RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp
+bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5
+IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0xOTEy
+MjQxODIwNTFaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3
+LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp
+YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG
+A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq
+K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe
+sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX
+MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT
+XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/
+HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH
+4QIDAQABo3QwcjARBglghkgBhvhCAQEEBAMCAAcwHwYDVR0jBBgwFoAUVeSB0RGA
+vtiJuQijMfmhJAkWuXAwHQYDVR0OBBYEFFXkgdERgL7YibkIozH5oSQJFrlwMB0G
+CSqGSIb2fQdBAAQQMA4bCFY1LjA6NC4wAwIEkDANBgkqhkiG9w0BAQUFAAOCAQEA
+WUesIYSKF8mciVMeuoCFGsY8Tj6xnLZ8xpJdGGQC49MGCBFhfGPjK50xA3B20qMo
+oPS7mmNz7W3lKtvtFKkrxjYR0CvrB4ul2p5cGZ1WEvVUKcgF7bISKo30Axv/55IQ
+h7A6tcOdBTcSo8f0FbnVpDkWm1M6I5HxqIKiaohowXkCIryqptau37AUX7iH0N18
+f3v/rxzP5tsHrV7bhZ3QKw0z2wTR5klAEyt2+z7pnIkPFc4YsIV4IU9rTw76NmfN
+B/L/CNDi3tm/Kq+4h4YhPATKt5Rof8886ZjXOP/swNlQ8C5LWK5Gb9Auw2DaclVy
+vUxFnmG6v4SBkgPR0ml8xQ==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML
+RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp
+bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5
+IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3
+MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3
+LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp
+YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG
+A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq
+K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe
+sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX
+MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT
+XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/
+HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH
+4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
+HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub
+j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo
+U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf
+zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b
+u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+
+bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er
+fF6adulZkMV8gzURZVE=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe
+MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0
+ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe
+Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw
+IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL
+SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH
+SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh
+ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X
+DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1
+TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ
+fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA
+sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU
+WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS
+nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH
+dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip
+NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC
+AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF
+MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH
+ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB
+uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl
+PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP
+JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/
+gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2
+j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6
+5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB
+o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS
+/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z
+Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE
+W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D
+hNQ+IIX3Sj0rnP0qCglN6oH4EZw=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEYDCCA0igAwIBAgICATAwDQYJKoZIhvcNAQELBQAwWTELMAkGA1UEBhMCVVMx
+GDAWBgNVBAoTD1UuUy4gR292ZXJubWVudDENMAsGA1UECxMERlBLSTEhMB8GA1UE
+AxMYRmVkZXJhbCBDb21tb24gUG9saWN5IENBMB4XDTEwMTIwMTE2NDUyN1oXDTMw
+MTIwMTE2NDUyN1owWTELMAkGA1UEBhMCVVMxGDAWBgNVBAoTD1UuUy4gR292ZXJu
+bWVudDENMAsGA1UECxMERlBLSTEhMB8GA1UEAxMYRmVkZXJhbCBDb21tb24gUG9s
+aWN5IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2HX7NRY0WkG/
+Wq9cMAQUHK14RLXqJup1YcfNNnn4fNi9KVFmWSHjeavUeL6wLbCh1bI1FiPQzB6+
+Duir3MPJ1hLXp3JoGDG4FyKyPn66CG3G/dFYLGmgA/Aqo/Y/ISU937cyxY4nsyOl
+4FKzXZbpsLjFxZ+7xaBugkC7xScFNknWJidpDDSPzyd6KgqjQV+NHQOGgxXgVcHF
+mCye7Bpy3EjBPvmE0oSCwRvDdDa3ucc2Mnr4MrbQNq4iGDGMUHMhnv6DOzCIJOPp
+wX7e7ZjHH5IQip9bYi+dpLzVhW86/clTpyBLqtsgqyFOHQ1O5piF5asRR12dP8Qj
+wOMUBm7+nQIDAQABo4IBMDCCASwwDwYDVR0TAQH/BAUwAwEB/zCB6QYIKwYBBQUH
+AQsEgdwwgdkwPwYIKwYBBQUHMAWGM2h0dHA6Ly9odHRwLmZwa2kuZ292L2ZjcGNh
+L2NhQ2VydHNJc3N1ZWRCeWZjcGNhLnA3YzCBlQYIKwYBBQUHMAWGgYhsZGFwOi8v
+bGRhcC5mcGtpLmdvdi9jbj1GZWRlcmFsJTIwQ29tbW9uJTIwUG9saWN5JTIwQ0Es
+b3U9RlBLSSxvPVUuUy4lMjBHb3Zlcm5tZW50LGM9VVM/Y0FDZXJ0aWZpY2F0ZTti
+aW5hcnksY3Jvc3NDZXJ0aWZpY2F0ZVBhaXI7YmluYXJ5MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQUrQx6dVzl85jEeZgOrCj9l/TnAvwwDQYJKoZIhvcNAQELBQAD
+ggEBAI9z2uF/gLGH9uwsz9GEYx728Yi3mvIRte9UrYpuGDco71wb5O9Qt2wmGCMi
+TR0mRyDpCZzicGJxqxHPkYnos/UqoEfAFMtOQsHdDA4b8Idb7OV316rgVNdF9IU+
+7LQd3nyKf1tNnJaK0KIyn9psMQz4pO9+c+iR3Ah6cFqgr2KBWfgAdKLI3VTKQVZH
+venAT+0g3eOlCd+uKML80cgX2BLHb94u6b2akfI8WpQukSKAiaGMWMyDeiYZdQKl
+Dn0KJnNR6obLB6jI/WNaNZvSr79PMUjBhHDbNXuaGQ/lj/RqDG8z2esccKIN47lQ
+A2EC/0rskqTcLe4qNJMHtyznGI8=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT
+MRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i
+YWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG
+EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg
+R2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9
+9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq
+fnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv
+iS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU
+1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+
+bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW
+MPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA
+ephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l
+uMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn
+Z57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS
+tQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF
+PseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un
+hw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV
+5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL
+MAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj
+KSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2
+MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0
+eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV
+BAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw
+NyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV
+BAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH
+MjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL
+So17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal
+tJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO
+BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG
+CCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT
+qQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz
+rD6ogRLQy7rQkgu2npaqBA+K
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCB
+mDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsT
+MChjKSAyMDA4IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s
+eTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhv
+cml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIzNTk1OVowgZgxCzAJ
+BgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg
+MjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0
+BgNVBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
+LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz
++uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5jK/BGvESyiaHAKAxJcCGVn2TAppMSAmUm
+hsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdEc5IiaacDiGydY8hS2pgn
+5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3CIShwiP/W
+JmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exAL
+DmKudlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZC
+huOl1UcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw
+HQYDVR0OBBYEFMR5yo6hTgMdHNxr2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IB
+AQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9cr5HqQ6XErhK8WTTOd8lNNTB
+zU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbEAp7aDHdlDkQN
+kv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD
+AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUH
+SJsMC8tJP33st/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2G
+spki4cErx5z481+oghLrGREt
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBY
+MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMo
+R2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEx
+MjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQK
+Ew1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQcmltYXJ5IENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9
+AWbK7hWNb6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjA
+ZIVcFU2Ix7e64HXprQU9nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE0
+7e9GceBrAqg1cmuXm2bgyxx5X9gaBGgeRwLmnWDiNpcB3841kt++Z8dtd1k7j53W
+kBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGttm/81w7a4DSwDRp35+MI
+mO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G
+A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJ
+KoZIhvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ1
+6CePbJC/kRYkRj5KTs4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl
+4b7UVXGYNTq+k+qurUKykG/g/CFNNWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6K
+oKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHaFloxt/m0cYASSJlyc1pZU8Fj
+UjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG1riR/aYNKxoU
+AT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYD
+VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0
+IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3
+MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD
+aGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMxNDBaFw0zODA3MzEx
+MjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3Vy
+cmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAG
+A1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAl
+BgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZI
+hvcNAQEBBQADggIPADCCAgoCggIBAMDfVtPkOpt2RbQT2//BthmLN0EYlVJH6xed
+KYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXfXjaOcNFccUMd2drvXNL7
+G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0ZJJ0YPP2
+zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4
+ddPB/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyG
+HoiMvvKRhI9lNNgATH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2
+Id3UwD2ln58fQ1DJu7xsepeY7s2MH/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3V
+yJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfeOx2YItaswTXbo6Al/3K1dh3e
+beksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSFHTynyQbehP9r
+6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh
+wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsog
+zCtLkykPAgMBAAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQW
+BBS5CcqcHtvTbDprru1U8VuTBjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDpr
+ru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UEBhMCRVUxQzBBBgNVBAcTOk1hZHJp
+ZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJmaXJtYS5jb20vYWRk
+cmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJmaXJt
+YSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiC
+CQDJzdPp1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCow
+KAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZI
+hvcNAQEFBQADggIBAICIf3DekijZBZRG/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZ
+UohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6ReAJ3spED8IXDneRRXoz
+X1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/sdZ7LoR/x
+fxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVz
+a2Mg9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yyd
+Yhz2rXzdpjEetrHHfoUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMd
+SqlapskD7+3056huirRXhOukP9DuqqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9O
+AP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETrP3iZ8ntxPjzxmKfFGBI/5rso
+M0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVqc5iJWzouE4ge
+v8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z
+09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEn
+MCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQL
+ExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENo
+YW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYxNDE4WhcNMzcwOTMwMTYxNDE4WjB9
+MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgy
+NzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEgMB4G
+A1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUA
+A4IBDQAwggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0
+Mi+ITaFgCPS3CU6gSS9J1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/s
+QJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8Oby4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpV
+eAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl6DJWk0aJqCWKZQbua795
+B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c8lCrEqWh
+z0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0T
+AQH/BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1i
+ZXJzaWduLm9yZy9jaGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4w
+TcbOX60Qq+UDpfqpFDAOBgNVHQ8BAf8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAH
+MCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBjaGFtYmVyc2lnbi5vcmcwKgYD
+VR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9yZzBbBgNVHSAE
+VDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh
+bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0B
+AQUFAAOCAQEAPDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUM
+bKGKfKX0j//U2K0X1S0E0T9YgOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXi
+ryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJPJ7oKXqJ1/6v/2j1pReQvayZzKWG
+VwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4IBHNfTIzSJRUTN3c
+ecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREest2d/
+AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG
+A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv
+b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw
+MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i
+YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT
+aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ
+jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp
+xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp
+1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG
+snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ
+U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8
+9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E
+BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B
+AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz
+yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE
+38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP
+AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad
+DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME
+HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEk
+MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpH
+bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX
+DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD
+QSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu
+MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprlOQcJ
+FspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAw
+DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61F
+uOJAf/sKbvu+M8k8o4TVMAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGX
+kPoUVy0D7O48027KqGx2vKLeuwIgJ6iFJzWbVsaj8kfSt24bAgAXqmemFZHe+pTs
+ewv4n4Q=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk
+MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH
+bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX
+DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD
+QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu
+MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc
+8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke
+hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD
+VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI
+KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg
+515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO
+xwy8p2Fp8fc74SrL+SvzZpA3
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G
+A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp
+Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1
+MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG
+A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL
+v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8
+eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq
+tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd
+C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa
+zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB
+mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH
+V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n
+bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG
+3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs
+J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO
+291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS
+ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd
+AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7
+TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G
+A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp
+Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4
+MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG
+A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8
+RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT
+gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm
+KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd
+QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ
+XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw
+DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o
+LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU
+RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp
+jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK
+6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX
+mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs
+Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH
+WD9f
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh
+MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE
+YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3
+MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo
+ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg
+MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN
+ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA
+PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w
+wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi
+EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY
+avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+
+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE
+sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h
+/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5
+IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD
+ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy
+OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P
+TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ
+HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER
+dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf
+ReYNnyicsbkqWletNw+vHX/bvZ8=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT
+EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp
+ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz
+NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH
+EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE
+AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw
+DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD
+E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH
+/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy
+DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh
+GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR
+tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA
+AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE
+FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX
+WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu
+9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr
+gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo
+2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO
+LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI
+4uJEvlz36hz1
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFSzCCAzOgAwIBAgIRALZLiAfiI+7IXBKtpg4GofIwDQYJKoZIhvcNAQELBQAw
+PzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dvdmVybm1lbnQgUm9vdCBDZXJ0aWZp
+Y2F0aW9uIEF1dGhvcml0eTAeFw0xMjA5MjgwODU4NTFaFw0zNzEyMzExNTU5NTla
+MD8xCzAJBgNVBAYTAlRXMTAwLgYDVQQKDCdHb3Zlcm5tZW50IFJvb3QgQ2VydGlm
+aWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
+AQC2/5c8gb4BWCQnr44BK9ZykjAyG1+bfNTUf+ihYHMwVxAA+lCWJP5Q5ow6ldFX
+eYTVZ1MMKoI+GFy4MCYa1l7GLbIEUQ7v3wxjR+vEEghRK5lxXtVpe+FdyXcdIOxW
+juVhYC386RyA3/pqg7sFtR4jEpyCygrzFB0g5AaPQySZn7YKk1pzGxY5vgW28Yyl
+ZJKPBeRcdvc5w88tvQ7Yy6gOMZvJRg9nU0MEj8iyyIOAX7ryD6uBNaIgIZfOD4k0
+eA/PH07p+4woPN405+2f0mb1xcoxeNLOUNFggmOd4Ez3B66DNJ1JSUPUfr0t4urH
+cWWACOQ2nnlwCjyHKenkkpTqBpIpJ3jmrdc96QoLXvTg1oadLXLLi2RW5vSueKWg
+OTNYPNyoj420ai39iHPplVBzBN8RiD5C1gJ0+yzEb7xs1uCAb9GGpTJXA9ZN9E4K
+mSJ2fkpAgvjJ5E7LUy3Hsbbi08J1J265DnGyNPy/HE7CPfg26QrMWJqhGIZO4uGq
+s3NZbl6dtMIIr69c/aQCb/+4DbvVq9dunxpPkUDwH0ZVbaCSw4nNt7H/HLPLo5wK
+4/7NqrwB7N1UypHdTxOHpPaY7/1J1lcqPKZc9mA3v9g+fk5oKiMyOr5u5CI9ByTP
+isubXVGzMNJxbc5Gim18SjNE2hIvNkvy6fFRCW3bapcOFwIDAQABo0IwQDAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBTVZx3gnHosnMvFmOcdByYqhux0zTAOBgNV
+HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQELBQADggIBAJA75cJTQijq9TFOjj2Rnk0J
+89ixUuZPrAwxIbvx6pnMg/y2KOTshAcOD06Xu29oRo8OURWV+Do7H1+CDgxxDryR
+T64zLiNB9CZrTxOH+nj2LsIPkQWXqmrBap+8hJ4IKifd2ocXhuGzyl3tOKkpboTe
+Rmv8JxlQpRJ6jH1i/NrnzLyfSa8GuCcn8on3Fj0Y5r3e9YwSkZ/jBI3+BxQaWqw5
+ghvxOBnhY+OvbLamURfr+kvriyL2l/4QOl+UoEtTcT9a4RD4co+WgN2NApgAYT2N
+vC2xR8zaXeEgp4wxXPHj2rkKhkfIoT0Hozymc26Uke1uJDr5yTDRB6iBfSZ9fYTf
+hsmL5a4NHr6JSFEVg5iWL0rrczTXdM3Jb9DCuiv2mv6Z3WAUjhv5nDk8f0OJU+jl
+wqu+Iq0nOJt3KLejY2OngeepaUXrjnhWzAWEx/uttjB8YwWfLYwkf0uLkvw4Hp+g
+pVezbp3YZLhwmmBScMip0P/GnO0QYV7Ngw5u6E0CQUridgR51lQ/ipgyFKDdLZzn
+uoJxo4ZVKZnSKdt1OvfbQ/+2W/u3fjWAjg1srnm3Ni2XUqGwB5wH5Ss2zQOXlL0t
+DjQG/MAWifw3VOTWzz0TBPKR2ck2Lj7FWtClTILD/y58Jnb38/1FoqVuVa4uzM8s
+iTTa9g3nkagQ6hed8vbs
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1Ix
+RDBCBgNVBAoTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1
+dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1p
+YyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIFJvb3RDQSAyMDExMB4XDTExMTIw
+NjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYTAkdSMUQwQgYDVQQK
+EztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIENl
+cnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl
+c2VhcmNoIEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBAKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPz
+dYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJ
+fel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa71HFK9+WXesyHgLacEns
+bgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u8yBRQlqD
+75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSP
+FEDH3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNV
+HRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp
+5dgTBCPuQSUwRwYDVR0eBEAwPqA8MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQu
+b3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQub3JnMA0GCSqGSIb3DQEBBQUA
+A4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVtXdMiKahsog2p
+6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8
+TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7
+dIsXRSZMFpGD/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8Acys
+Nnq/onN694/BtZqhFLKPM58N7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXI
+l7WdmplNsDz4SgCbZN2fOUvRJ9e4
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsx
+FjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3Qg
+Um9vdCBDQSAxMB4XDTAzMDUxNTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkG
+A1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdr
+b25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1ApzQ
+jVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEn
+PzlTCeqrauh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjh
+ZY4bXSNmO7ilMlHIhqqhqZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9
+nnV0ttgCXjqQesBCNnLsak3c78QA3xMYV18meMjWCnl3v/evt3a5pQuEF10Q6m/h
+q5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNVHRMBAf8ECDAGAQH/AgED
+MA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7ih9legYsC
+mEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI3
+7piol7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clB
+oiMBdDhViw+5LmeiIAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJs
+EhTkYY2sEJCehFC78JZvRZ+K88psT/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpO
+fMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilTc4afU9hDDl3WY4JxHYB0yvbi
+AmvZWg==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFHjCCBAagAwIBAgIEAKA3oDANBgkqhkiG9w0BAQsFADCBtzELMAkGA1UEBhMC
+Q1oxOjA4BgNVBAMMMUkuQ0EgLSBRdWFsaWZpZWQgQ2VydGlmaWNhdGlvbiBBdXRo
+b3JpdHksIDA5LzIwMDkxLTArBgNVBAoMJFBydm7DrSBjZXJ0aWZpa2HEjW7DrSBh
+dXRvcml0YSwgYS5zLjE9MDsGA1UECww0SS5DQSAtIEFjY3JlZGl0ZWQgUHJvdmlk
+ZXIgb2YgQ2VydGlmaWNhdGlvbiBTZXJ2aWNlczAeFw0wOTA5MDEwMDAwMDBaFw0x
+OTA5MDEwMDAwMDBaMIG3MQswCQYDVQQGEwJDWjE6MDgGA1UEAwwxSS5DQSAtIFF1
+YWxpZmllZCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSwgMDkvMjAwOTEtMCsGA1UE
+CgwkUHJ2bsOtIGNlcnRpZmlrYcSNbsOtIGF1dG9yaXRhLCBhLnMuMT0wOwYDVQQL
+DDRJLkNBIC0gQWNjcmVkaXRlZCBQcm92aWRlciBvZiBDZXJ0aWZpY2F0aW9uIFNl
+cnZpY2VzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtTaEy0KC8M9l
+4lSaWHMs4+sVV1LwzyJYiIQNeCrv1HHm/YpGIdY/Z640ceankjQvIX7m23BK4OSC
+6KO8kZYA3zopOz6GFCOKV2PvLukbc+c2imF6kLHEv6qNA8WxhPbR3xKwlHDwB2yh
+Wzo7V3QVgDRG83sugqQntKYC3LnlTGbJpNP+Az72gpO9AHUn/IBhFk4ksc8lYS2L
+9GCy9CsmdKSBP78p9w8Lx7vDLqkDgt1/zBrcUWmSSb7AE/BPEeMryQV1IdI6nlGn
+BhWkXOYf6GSdayJw86btuxC7viDKNrbp44HjQRaSxnp6O3eto1x4DfiYdw/YbJFe
+7EjkxSQBywIDAQABo4IBLjCCASowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E
+BAMCAQYwgecGA1UdIASB3zCB3DCB2QYEVR0gADCB0DCBzQYIKwYBBQUHAgIwgcAa
+gb1UZW50byBjZXJ0aWZpa2F0IGplIHZ5ZGFuIGpha28ga3ZhbGlmaWtvdmFueSBz
+eXN0ZW1vdnkgY2VydGlmaWthdCBwb2RsZSB6YWtvbmEgYy4gMjI3LzIwMDAgU2Iu
+IHYgcGxhdG5lbSB6bmVuaS9UaGlzIGlzIHF1YWxpZmllZCBzeXN0ZW0gY2VydGlm
+aWNhdGUgYWNjb3JkaW5nIHRvIEN6ZWNoIEFjdCBOby4gMjI3LzIwMDAgQ29sbC4w
+HQYDVR0OBBYEFHnL0CPpOmdwkXRP01Hi4CD94Sj7MA0GCSqGSIb3DQEBCwUAA4IB
+AQB9laU214hYaBHPZftbDS/2dIGLWdmdSbj1OZbJ8LIPBMxYjPoEMqzAR74tw96T
+i6aWRa5WdOWaS6I/qibEKFZhJAVXX5mkx2ewGFLJ+0Go+eTxnjLOnhVF2V2s+57b
+m8c8j6/bS6Ij6DspcHEYpfjjh64hE2r0aSpZDjGzKFM6YpqsCJN8qYe2X1qmGMLQ
+wvNdjG+nPzCJOOuUEypIWt555ZDLXqS5F7ZjBjlfyDZjEfS2Es9Idok8alf563Mi
+9/o+Ba46wMYOkk3P1IlU0RqCajdbliioACKDztAqubONU1guZVzV8tuMASVzbJeL
+/GAB7ECTwe1RuKrLYtglMKI9
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK
+MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu
+VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw
+MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw
+JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT
+3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU
++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp
+S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1
+bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi
+T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL
+vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK
+Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK
+dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT
+c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv
+l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N
+iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
+/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD
+ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH
+6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt
+LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93
+nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3
++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK
+W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT
+AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq
+l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG
+4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ
+mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A
+7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN
+MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu
+VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN
+MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0
+MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7
+ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy
+RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS
+bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF
+/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R
+3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw
+EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy
+9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V
+GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ
+2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV
+WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD
+W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
+BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN
+AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj
+t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV
+DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9
+TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G
+lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW
+mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df
+WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5
++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ
+tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA
+GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv
+8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
+TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
+cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
+WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
+ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
+MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
+h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
+0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
+A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
+T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
+B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
+B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
+KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
+OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
+jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
+qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
+rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
+hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
+ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
+3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
+NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
+ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
+TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
+jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
+oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
+4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
+mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
+emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEXzCCA0egAwIBAgIBATANBgkqhkiG9w0BAQUFADCB0DELMAkGA1UEBhMCRVMx
+SDBGBgNVBAoTP0laRU5QRSBTLkEuIC0gQ0lGIEEtMDEzMzcyNjAtUk1lcmMuVml0
+b3JpYS1HYXN0ZWl6IFQxMDU1IEY2MiBTODFCMEAGA1UEBxM5QXZkYSBkZWwgTWVk
+aXRlcnJhbmVvIEV0b3JiaWRlYSAzIC0gMDEwMTAgVml0b3JpYS1HYXN0ZWl6MRMw
+EQYDVQQDEwpJemVucGUuY29tMR4wHAYJKoZIhvcNAQkBFg9JbmZvQGl6ZW5wZS5j
+b20wHhcNMDMwMTMwMjMwMDAwWhcNMTgwMTMwMjMwMDAwWjCB0DELMAkGA1UEBhMC
+RVMxSDBGBgNVBAoTP0laRU5QRSBTLkEuIC0gQ0lGIEEtMDEzMzcyNjAtUk1lcmMu
+Vml0b3JpYS1HYXN0ZWl6IFQxMDU1IEY2MiBTODFCMEAGA1UEBxM5QXZkYSBkZWwg
+TWVkaXRlcnJhbmVvIEV0b3JiaWRlYSAzIC0gMDEwMTAgVml0b3JpYS1HYXN0ZWl6
+MRMwEQYDVQQDEwpJemVucGUuY29tMR4wHAYJKoZIhvcNAQkBFg9JbmZvQGl6ZW5w
+ZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC1btoCXXhp3xIW
+D+Bxl8nUCxkyiazWfpt0e68t+Qt9+lZjKZSdEw2Omj4qvr+ovRmDXO3iWpWVOWDl
+3JHJjAzFCe8ZEBNDH+QNYwZHmPBaMYFOYFdbAFVHWvys152C308hcFJ6xWWGmjvl
+2eMiEl9P2nR2LWue368DCu+ak7j3gjAXaCOdP1a7Bfr+RW3X2SC5R4Xyp8iHlL5J
+PHJD/WBkLrezwzQPdACw8m9EG7q9kUwlNpL32mROujS3ZkT6mQTzJieLiE3X04s0
+uIUqVkk5MhjcHFf7al0N5CzjtTcnXYJKN2Z9EDVskk4olAdGi46eSoZXbjUOP5gk
+Ej6wVZAXAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG
+MB0GA1UdDgQWBBTqVk/sPIOhFIh4gbIrBSLAB0FbQjANBgkqhkiG9w0BAQUFAAOC
+AQEAYp7mEzzhw6o5Hf5+T5kcI+t4BJyiIWy7vHlLs/G8dLYXO81aN/Mzg928eMTR
+TxxYZL8dd9uwsJ50TVfX6L0R4Dyw6wikh3fHRrat9ufXi63j5K91Ysr7aXqnF38d
+iAgHYkrwC3kuxHBb9C0KBz6h8Q45/KCyN7d37wWAq38yyhPDlaOvyoE6bdUuK5hT
+m5EYA5JmPyrhQ1moDOyueWBAjxzMEMj+OAY1H90cLv6wszsqerxRrdTOHBdv7MjB
+EIpvEEQkXUxVXAzFuuT6m2t91Lfnwfl/IvljHaVC7DlyyhRYHD6D4Rx+4QKp4tWL
+vpw6LkI+gKNJ/YdMCsRZQzEEFA==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIF8DCCA9igAwIBAgIPBuhGJy8fCo/RhFzjafbVMA0GCSqGSIb3DQEBBQUAMDgx
+CzAJBgNVBAYTAkVTMRQwEgYDVQQKDAtJWkVOUEUgUy5BLjETMBEGA1UEAwwKSXpl
+bnBlLmNvbTAeFw0wNzEyMTMxMzA4MjdaFw0zNzEyMTMwODI3MjVaMDgxCzAJBgNV
+BAYTAkVTMRQwEgYDVQQKDAtJWkVOUEUgUy5BLjETMBEGA1UEAwwKSXplbnBlLmNv
+bTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMnTesoPHqynhugWZWqx
+whtFMnGV2f4QW8yv56V5AY+Jw8ryVXH3d753lPNypCxE2J6SmxQ6oeckkAoKVo7F
+2CaU4dlI4S0+2gpy3aOZFdqBoof0e24md4lYrdbrDLJBenNubdt6eEHpCIgSfocu
+ZhFjbFT7PJ1ywLwu/8K33Q124zrX97RovqL144FuwUZvXY3gTcZUVYkaMzEKsVe5
+o4qYw+w7NMWVQWl+dcI8IMVhulFHoCCQk6GQS/NOfIVFVJrRBSZBsLVNHTO+xAPI
+JXzBcNs79AktVCdIrC/hxKw+yMuSTFM5NyPs0wH54AlETU1kwOENWocivK0bo/4m
+tRXzp/yEGensoYi0RGmEg/OJ0XQGqcwL1sLeJ4VQJsoXuMl6h1YsGgEebL4TrRCs
+tST1OJGh1kva8bvS3ke18byB9llrzxlT6Y0Vy0rLqW9E5RtBz+GGp8rQap+8TI0G
+M1qiheWQNaBiXBZO8OOi+gMatCxxs1gs3nsL2xoP694hHwZ3BgOwye+Z/MC5TwuG
+KP7Suerj2qXDR2kS4Nvw9hmL7Xtw1wLW7YcYKCwEJEx35EiKGsY7mtQPyvp10gFA
+Wo15v4vPS8+qFsGV5K1Mij4XkdSxYuWC5YAEpAN+jb/af6IPl08M0w3719Hlcn4c
+yHf/W5oPt64FRuXxqBbsR6QXAgMBAAGjgfYwgfMwgbAGA1UdEQSBqDCBpYEPaW5m
+b0BpemVucGUuY29tpIGRMIGOMUcwRQYDVQQKDD5JWkVOUEUgUy5BLiAtIENJRiBB
+MDEzMzcyNjAtUk1lcmMuVml0b3JpYS1HYXN0ZWl6IFQxMDU1IEY2MiBTODFDMEEG
+A1UECQw6QXZkYSBkZWwgTWVkaXRlcnJhbmVvIEV0b3JiaWRlYSAxNCAtIDAxMDEw
+IFZpdG9yaWEtR2FzdGVpejAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQUHRxlDqjyJXu0kc/ksbHmvVV0bAUwDQYJKoZIhvcNAQEFBQAD
+ggIBAMeBRm8hGE+gBe/n1bqXUKJg7aWSFBpSm/nxiEqg3Hh10dUflU7F57dp5iL0
++CmoKom+z892j+Mxc50m0xwbRxYpB2iEitL7sRskPtKYGCwkjq/2e+pEFhsqxPqg
+l+nqbFik73WrAGLRne0TNtsiC7bw0fRue0aHwp28vb5CO7dz0JoqPLRbEhYArxk5
+ja2DUBzIgU+9Ag89njWW7u/kwgN8KRwCfr00J16vU9adF79XbOnQgxCvv11N75B7
+XSus7Op9ACYXzAJcY9cZGKfsK8eKPlgOiofmg59OsjQerFQJTx0CCzl+gQgVuaBp
+E8gyK+OtbBPWg50jLbJtooiGfqgNASYJQNntKE6MkyQP2/EeTXp6WuKlWPHcj1+Z
+ggwuz7LdmMySlD/5CbOlliVbN/UShUHiGUzGigjB3Bh6Dx4/glmimj4/+eAJn/3B
+kUtdyXvWton83x18hqrNA/ILUpLxYm9/h+qrdslsUMIZgq+qHfUgKGgu1fxkN0/P
+pUTEvnK0jHS0bKf68r10OEMr3q/53NjgnZ/cPcqlY0S/kqJPTIAcuxrDmkoEVU3K
+7iYLHL8CxWTTnn7S05EcS6L1HOUXHA0MUqORH5zwIe0ClG+poEnK6EOMxPQ02nwi
+o8ZmPrgbBYhdurz3vOXcFD2nhqi2WVIhA16L4wTtSyoeo09Q
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4
+MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6
+ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD
+VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j
+b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq
+scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO
+xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H
+LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX
+uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD
+yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+
+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q
+rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN
+BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L
+hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB
+QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+
+HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu
+Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg
+QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB
+BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx
+MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA
+A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb
+laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56
+awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo
+JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw
+LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT
+VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk
+LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb
+UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/
+QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+
+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls
+QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDczCCAlugAwIBAgIBBDANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJLUjEN
+MAsGA1UECgwES0lTQTEuMCwGA1UECwwlS29yZWEgQ2VydGlmaWNhdGlvbiBBdXRo
+b3JpdHkgQ2VudHJhbDEWMBQGA1UEAwwNS0lTQSBSb290Q0EgMTAeFw0wNTA4MjQw
+ODA1NDZaFw0yNTA4MjQwODA1NDZaMGQxCzAJBgNVBAYTAktSMQ0wCwYDVQQKDARL
+SVNBMS4wLAYDVQQLDCVLb3JlYSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSBDZW50
+cmFsMRYwFAYDVQQDDA1LSVNBIFJvb3RDQSAxMIIBIDANBgkqhkiG9w0BAQEFAAOC
+AQ0AMIIBCAKCAQEAvATk+hM58DSWIGtsaLv623f/J/es7C/n/fB/bW+MKs0lCVsk
+9KFo/CjsySXirO3eyDOE9bClCTqnsUdIxcxPjHmc+QZXfd3uOPbPFLKc6tPAXXdi
+8EcNuRpAU1xkcK8IWsD3z3X5bI1kKB4g/rcbGdNaZoNy4rCbvdMlFQ0yb2Q3lIVG
+yHK+d9VuHygvx2nt54OJM1jT3qC/QOhDUO7cTWu8peqmyGGO9cNkrwYV3CmLP3WM
+vHFE2/yttRcdbYmDz8Yzvb9Fov4Kn6MRXw+5H5wawkbMnChmn3AmPC7fqoD+jMUE
+CSVPzZNHPDfqAmeS/vwiJFys0izgXAEzisEZ2wIBA6MyMDAwHQYDVR0OBBYEFL+2
+J9gDWnZlTGEBQVYx5Yt7OtnMMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEF
+BQADggEBABOvUQveimpb5poKyLGQSk6hAp3MiNKrZr097LuxQpVqslxa/6FjZJap
+aBV/JV6K+KRzwYCKhQoOUugy50X4TmWAkZl0Q+VFnUkq8JSV3enhMNITbslOsXfl
+BM+tWh6UCVrXPAgcrnrpFDLBRa3SJkhyrKhB2vAhhzle3/xk/2F0KpzZm4tfwjeT
+2KM3LzuTa7IbB6d/CVDv0zq+IWuKkDsnSlFOa56ch534eJAx7REnxqhZvvwYC/uO
+fi5C4e3nCSG9uRPFVmf0JqZCQ5BEVLRxm3bkGhKsGigA35vB1fjbXKP4krG9tNT5
+UNkAAk/bg9ART6RCVmE6fhMy04Qfybo=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD
+VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0
+ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G
+CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y
+OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx
+FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp
+Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o
+dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP
+kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc
+cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U
+fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7
+N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC
+xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1
++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G
+A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM
+Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG
+SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h
+mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk
+ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775
+tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c
+2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t
+HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG
+EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3
+MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl
+cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR
+dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB
+pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM
+b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm
+aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz
+IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT
+lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz
+AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5
+VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG
+ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2
+BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG
+AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M
+U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh
+bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C
++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC
+bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F
+uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2
+XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi
+MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu
+MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp
+dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV
+UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO
+ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz
+c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP
+OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl
+mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF
+BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4
+qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw
+gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB
+BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu
+bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp
+dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8
+6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/
+h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH
+/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv
+wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN
+pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCB
+ijELMAkGA1UEBhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHly
+aWdodCAoYykgMjAwNTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNl
+ZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQSBDQTAeFw0w
+NTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYDVQQGEwJDSDEQMA4G
+A1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIwIAYD
+VQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBX
+SVNlS2V5IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAy0+zAJs9Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxR
+VVuuk+g3/ytr6dTqvirdqFEr12bDYVxgAsj1znJ7O7jyTmUIms2kahnBAbtzptf2
+w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbDd50kc3vkDIzh2TbhmYsF
+mQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ/yxViJGg
+4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t9
+4B3RLoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYw
+DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQw
+EAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOx
+SPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vImMMkQyh2I+3QZH4VFvbBsUfk2
+ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4+vg1YFkCExh8
+vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa
+hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZi
+Fj4A4xylNoEYokxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ
+/L7fCg0=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt
+MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg
+Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i
+YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x
+CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG
+b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh
+bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3
+HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx
+WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX
+1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk
+u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P
+99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r
+M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw
+AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB
+BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh
+cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5
+gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO
+ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf
+aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic
+Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00
+MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV
+wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe
+rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341
+68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh
+4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp
+UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o
+abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc
+3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G
+KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt
+hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO
+Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt
+zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD
+ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC
+MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2
+cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN
+qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5
+YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv
+b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2
+8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k
+NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj
+ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp
+q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt
+nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00
+MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf
+qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW
+n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym
+c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+
+O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1
+o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j
+IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq
+IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz
+8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh
+vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l
+7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG
+cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD
+ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66
+AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC
+roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga
+W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n
+lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE
++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV
+csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd
+dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg
+KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM
+HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4
+WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x
+GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv
+b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV
+BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W
+YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa
+GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg
+Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J
+WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB
+rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp
++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1
+ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i
+Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz
+PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og
+/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH
+oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI
+yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud
+EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2
+A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL
+MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT
+ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f
+BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn
+g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl
+fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K
+WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha
+B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc
+hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR
+TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD
+mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z
+ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y
+4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza
+8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00
+MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR
+/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu
+FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR
+U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c
+ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR
+FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k
+A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw
+eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl
+sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp
+VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q
+A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+
+ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD
+ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px
+KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI
+FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv
+oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg
+u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP
+0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf
+3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl
+8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+
+DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN
+PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/
+ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x
+GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv
+b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV
+BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W
+YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM
+V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB
+4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr
+H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd
+8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv
+vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT
+mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe
+btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc
+T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt
+WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ
+c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A
+4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD
+VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG
+CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0
+aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0
+aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu
+dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw
+czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G
+A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC
+TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg
+Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0
+7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem
+d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd
++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B
+4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN
+t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x
+DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57
+k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s
+zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j
+Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT
+mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK
+4SVhM7JZG+Ju1zdXtg2pEto=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJC
+TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0
+aWZpY2F0aW9uIEF1dGhvcml0eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0
+aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAzMTkxODMzMzNaFw0yMTAzMTcxODMz
+MzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUw
+IwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQDEyVR
+dW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Yp
+li4kVEAkOPcahdxYTMukJ0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2D
+rOpm2RgbaIr1VxqYuvXtdj182d6UajtLF8HVj71lODqV0D1VNk7feVcxKh7YWWVJ
+WCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeLYzcS19Dsw3sgQUSj7cug
+F+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWenAScOospU
+xbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCC
+Ak4wPQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVv
+dmFkaXNvZmZzaG9yZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREw
+ggENMIIBCQYJKwYBBAG+WAABMIH7MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNl
+IG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmljYXRlIGJ5IGFueSBwYXJ0eSBh
+c3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJsZSBzdGFuZGFy
+ZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh
+Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYI
+KwYBBQUHAgEWFmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3T
+KbkGGew5Oanwl4Rqy+/fMIGuBgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rq
+y+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1p
+dGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYD
+VQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6tlCL
+MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSk
+fnIYj9lofFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf8
+7C9TqnN7Az10buYWnuulLsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1R
+cHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2xgI4JVrmcGmD+XcHXetwReNDWXcG31a0y
+mQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi5upZIof4l/UO/erMkqQW
+xFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi5nrQNiOK
+SnQ2+Q==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK
+MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
+GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx
+MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg
+Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ
+iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa
+/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ
+jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI
+HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7
+sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w
+gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF
+MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw
+KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG
+AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L
+URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO
+H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm
+I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY
+iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc
+f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI
+MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
+FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz
+MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv
+cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz
+Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO
+0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao
+wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj
+7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS
+8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT
+BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB
+/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg
+JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC
+NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3
+6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/
+3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm
+D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS
+CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR
+3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDEl
+MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMh
+U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIz
+MloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09N
+IFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNlY3VyaXR5IENvbW11
+bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSE
+RMqm4miO/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gO
+zXppFodEtZDkBp2uoQSXWHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5
+bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4zZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDF
+MxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4bepJz11sS6/vmsJWXMY1
+VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK9U2vP9eC
+OKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0G
+CSqGSIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HW
+tWS3irO4G8za+6xmiEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZ
+q51ihPZRwSzJIxXYKLerJRO1RuGGAv8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDb
+EJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnWmHyojf6GPgcWkuF75x3sM3Z+
+Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEWT1MKZPlO9L9O
+VL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEY
+MBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21t
+dW5pY2F0aW9uIFJvb3RDQTEwHhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5
+WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYD
+VQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEwggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw8yl8
+9f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJ
+DKaVv0uMDPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9
+Ms+k2Y7CI9eNqPPYJayX5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/N
+QV3Is00qVUarH9oe4kA92819uZKAnDfdDJZkndwi92SL32HeFZRSFaB9UslLqCHJ
+xrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2JChzAgMBAAGjPzA9MB0G
+A1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYwDwYDVR0T
+AQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vG
+kl3g0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfr
+Uj94nK9NrvjVT8+amCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5
+Bw+SUEmK3TGXX8npN6o7WWWXlDLJs58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJU
+JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot
+RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl
+MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe
+U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX
+DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy
+dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj
+YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV
+OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr
+zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM
+VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ
+hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO
+ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw
+awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs
+OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3
+DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF
+coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc
+okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8
+t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy
+1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/
+SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEP
+MA0GA1UEChMGU29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAx
+MDQwNjA3Mjk0MFoXDTIxMDQwNjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNV
+BAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJhIENsYXNzMiBDQTCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3/Ei9vX+ALTU74W+o
+Z6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybTdXnt
+5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s
+3TmVToMGf+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2Ej
+vOr7nQKV0ba5cTppCD8PtOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu
+8nYybieDwnPz3BjotJPqdURrBGAgcVeHnfO+oJAjPYok4doh28MCAwEAAaMzMDEw
+DwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITTXjwwCwYDVR0PBAQDAgEG
+MA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt0jSv9zil
+zqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/
+3DEIcbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvD
+FNr450kkkdAdavphOe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6
+Tk6ezAyNlNzZRZxe7EJQY670XcSxEtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2
+ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLHllpwrN9M
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJO
+TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFh
+dCBkZXIgTmVkZXJsYW5kZW4gRVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0y
+MjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5MMR4wHAYDVQQKDBVTdGFhdCBkZXIg
+TmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRlcmxhbmRlbiBFViBS
+b290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkkSzrS
+M4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nC
+UiY4iKTWO0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3d
+Z//BYY1jTw+bbRcwJu+r0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46p
+rfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13l
+pJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gVXJrm0w912fxBmJc+qiXb
+j5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr08C+eKxC
+KFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS
+/ZbV0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0X
+cgOPvZuM5l5Tnrmd74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH
+1vI4gnPah1vlPNOePqc7nvQDs/nxfRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrP
+px9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB
+/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwaivsnuL8wbqg7
+MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI
+eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u
+2dfOWBfoqSmuc0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHS
+v4ilf0X8rLiltTMMgsT7B/Zq5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTC
+wPTxGfARKbalGAKb12NMcIxHowNDXLldRqANb/9Zjr7dn3LDWyvfjFvO5QxGbJKy
+CqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tNf1zuacpzEPuKqf2e
+vTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi5Dp6
+Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIa
+Gl6I6lD4WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeL
+eG9QgkRQP2YGiqtDhFZKDyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8
+FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGyeUN51q1veieQA6TqJIc/2b3Z6fJfUEkc
+7uzXLg==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO
+TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh
+dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oX
+DTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl
+ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv
+b3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ5291
+qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8Sp
+uOUfiUtnvWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPU
+Z5uW6M7XxgpT0GtJlvOjCwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvE
+pMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiile7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp
+5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCROME4HYYEhLoaJXhena/M
+UGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpICT0ugpTN
+GmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy
+5V6548r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv
+6q012iDTiIJh8BIitrzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEK
+eN5KzlW/HdXZt1bv8Hb/C3m1r737qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6
+B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMBAAGjgZcwgZQwDwYDVR0TAQH/
+BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcCARYxaHR0cDov
+L3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV
+HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqG
+SIb3DQEBCwUAA4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLyS
+CZa59sCrI2AGeYwRTlHSeYAz+51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen
+5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwjf/ST7ZwaUb7dRUG/kSS0H4zpX897
+IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaNkqbG9AclVMwWVxJK
+gnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfkCpYL
++63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxL
+vJxxcypFURmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkm
+bEgeqmiSBeGCc1qb3AdbCG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvk
+N1trSt8sV4pAWja63XVECDdCcAz+3F4hoKOKwJCcaNpQ5kUQR3i2TtJlycM33+FC
+Y7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoVIPVVYpbtbZNQvOSqeK3Z
+ywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm66+KAQ==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO
+TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh
+dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloX
+DTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl
+ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv
+b3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4yolQP
+cPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WW
+IkYFsO2tx1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqX
+xz8ecAgwoNzFs21v0IJyEavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFy
+KJLZWyNtZrVtB0LrpjPOktvA9mxjeM3KTj215VKb8b475lRgsGYeCasH/lSJEULR
+9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUurmkVLoR9BvUhTFXFkC4az
+5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU51nus6+N8
+6U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7
+Ngzp07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHP
+bMk7ccHViLVlvMDoFxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXt
+BznaqB16nzaeErAMZRKQFWDZJkBE41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTt
+XUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMBAAGjQjBAMA8GA1UdEwEB/wQF
+MAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleuyjWcLhL75Lpd
+INyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD
+U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwp
+LiniyMMB8jPqKqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8
+Ipf3YF3qKS9Ysr1YvY2WTxB1v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixp
+gZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA8KCWAg8zxXHzniN9lLf9OtMJgwYh
+/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b8KKaa8MFSu1BYBQw
+0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0rmj1A
+fsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq
+4BZ+Extq1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR
+1VmiiXTTn74eS9fGbbeIJG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/
+QFH1T/U67cjF68IeHRaVesd+QnGTbksVtzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM
+94B7IWcnMFk=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl
+MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp
+U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw
+NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE
+ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp
+ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3
+DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf
+8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN
++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0
+X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa
+K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA
+1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G
+A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR
+zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0
+YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD
+bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w
+DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3
+L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D
+eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl
+xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp
+VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY
+WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
+HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs
+ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw
+MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6
+b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj
+aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp
+Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg
+nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1
+HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N
+Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN
+dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0
+HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO
+BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G
+CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU
+sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3
+4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg
+8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K
+pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1
+mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
+HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs
+ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5
+MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD
+VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy
+ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy
+dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p
+OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2
+8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K
+Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe
+hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk
+6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw
+DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q
+AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI
+bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB
+ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z
+qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd
+iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn
+0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN
+sSi6
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEW
+MBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlm
+aWNhdGlvbiBBdXRob3JpdHkgRzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1
+OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoG
+A1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgRzIwggIiMA0G
+CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8Oo1XJ
+JZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsD
+vfOpL9HG4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnoo
+D/Uefyf3lLE3PbfHkffiAez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/
+Q0kGi4xDuFby2X8hQxfqp0iVAXV16iulQ5XqFYSdCI0mblWbq9zSOdIxHWDirMxW
+RST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbsO+wmETRIjfaAKxojAuuK
+HDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8HvKTlXcxN
+nw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM
+0D4LnMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/i
+UUjXuG+v+E5+M5iSFGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9
+Ha90OrInwMEePnWjFqmveiJdnxMaz6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHg
+TuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE
+AwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJKoZIhvcNAQEL
+BQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K
+2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfX
+UfEpY9Z1zRbkJ4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl
+6/2o1PXWT6RbdejF0mCy2wl+JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK
+9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG/+gyRr61M3Z3qAFdlsHB1b6uJcDJ
+HgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTcnIhT76IxW1hPkWLI
+wpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/XldblhY
+XzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5l
+IxKVCCIcl85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoo
+hdVddLHRDiBYmxOlsGOm7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulr
+so8uBtjRkcfGEvRM/TAXw8HaOFvjqermobp573PYtlNXLfbQ4ddI
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW
+MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg
+Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM2WhcNMzYwOTE3MTk0NjM2WjB9
+MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi
+U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh
+cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA
+A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk
+pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf
+OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C
+Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT
+Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi
+HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM
+Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w
++2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+
+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3
+Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B
+26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID
+AQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE
+FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9j
+ZXJ0LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3Js
+LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFM
+BgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUHAgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0
+Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRwOi8vY2VydC5zdGFy
+dGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYgU3Rh
+cnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlh
+YmlsaXR5LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2Yg
+dGhlIFN0YXJ0Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFp
+bGFibGUgYXQgaHR0cDovL2NlcnQuc3RhcnRjb20ub3JnL3BvbGljeS5wZGYwEQYJ
+YIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNT
+TCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOCAgEAFmyZ
+9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8
+jhvh3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUW
+FjgKXlf2Ysd6AgXmvB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJz
+ewT4F+irsfMuXGRuczE6Eri8sxHkfY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1
+ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3fsNrarnDy0RLrHiQi+fHLB5L
+EUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZEoalHmdkrQYu
+L6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq
+yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuC
+O3NJo2pXh5Tl1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6V
+um0ABj6y6koQOdjQK/W/7HW/lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkySh
+NOsF/5oirpt9P/FlUQqmMGqz9IgcgA38corog14=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEW
+MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg
+Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM3WhcNMzYwOTE3MTk0NjM2WjB9
+MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi
+U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh
+cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA
+A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk
+pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf
+OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C
+Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT
+Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi
+HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM
+Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w
++2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+
+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3
+Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B
+26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID
+AQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD
+VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFul
+F2mHMMo0aEPQQa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCC
+ATgwLgYIKwYBBQUHAgEWImh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5w
+ZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL2ludGVybWVk
+aWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENvbW1lcmNpYWwgKFN0
+YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0aGUg
+c2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0
+aWZpY2F0aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93
+d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgG
+CWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1
+dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5fPGFf59Jb2vKXfuM/gTF
+wWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWmN3PH/UvS
+Ta0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst
+0OcNOrg+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNc
+pRJvkrKTlMeIFw6Ttn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKl
+CcWw0bdT82AUuoVpaiF8H3VhFyAXe2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVF
+P0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA2MFrLH9ZXF2RsXAiV+uKa0hK
+1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBsHvUwyKMQ5bLm
+KhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE
+JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ
+8dCAWZvLMdibD4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnm
+fyWl8kgAwKQB2j8=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIF2TCCA8GgAwIBAgIQXAuFXAvnWUHfV8w/f52oNjANBgkqhkiG9w0BAQUFADBk
+MQswCQYDVQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0
+YWwgQ2VydGlmaWNhdGUgU2VydmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3Qg
+Q0EgMTAeFw0wNTA4MTgxMjA2MjBaFw0yNTA4MTgyMjA2MjBaMGQxCzAJBgNVBAYT
+AmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGlnaXRhbCBDZXJ0aWZp
+Y2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAxMIICIjAN
+BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0LmwqAzZuz8h+BvVM5OAFmUgdbI9
+m2BtRsiMMW8Xw/qabFbtPMWRV8PNq5ZJkCoZSx6jbVfd8StiKHVFXqrWW/oLJdih
+FvkcxC7mlSpnzNApbjyFNDhhSbEAn9Y6cV9Nbc5fuankiX9qUvrKm/LcqfmdmUc/
+TilftKaNXXsLmREDA/7n29uj/x2lzZAeAR81sH8A25Bvxn570e56eqeqDFdvpG3F
+EzuwpdntMhy0XmeLVNxzh+XTF3xmUHJd1BpYwdnP2IkCb6dJtDZd0KTeByy2dbco
+kdaXvij1mB7qWybJvbCXc9qukSbraMH5ORXWZ0sKbU/Lz7DkQnGMU3nn7uHbHaBu
+HYwadzVcFh4rUx80i9Fs/PJnB3r1re3WmquhsUvhzDdf/X/NTa64H5xD+SpYVUNF
+vJbNcA78yeNmuk6NO4HLFWR7uZToXTNShXEuT46iBhFRyePLoW4xCGQMwtI89Tbo
+19AOeCMgkckkKmUpWyL3Ic6DXqTz3kvTaI9GdVyDCW4pa8RwjPWd1yAv/0bSKzjC
+L3UcPX7ape8eYIVpQtPM+GP+HkM5haa2Y0EQs3MevNP6yn0WR+Kn1dCjigoIlmJW
+bjTb2QK5MHXjBNLnj8KwEUAKrNVxAmKLMb7dxiNYMUJDLXT5xp6mig/p/r+D5kNX
+JLrvRjSq1xIBOO0CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0hBBYw
+FDASBgdghXQBUwABBgdghXQBUwABMBIGA1UdEwEB/wQIMAYBAf8CAQcwHwYDVR0j
+BBgwFoAUAyUv3m+CATpcLNwroWm1Z9SM0/0wHQYDVR0OBBYEFAMlL95vggE6XCzc
+K6FptWfUjNP9MA0GCSqGSIb3DQEBBQUAA4ICAQA1EMvspgQNDQ/NwNurqPKIlwzf
+ky9NfEBWMXrrpA9gzXrzvsMnjgM+pN0S734edAY8PzHyHHuRMSG08NBsl9Tpl7Ik
+Vh5WwzW9iAUPWxAaZOHHgjD5Mq2eUCzneAXQMbFamIp1TpBcahQq4FJHgmDmHtqB
+sfsUC1rxn9KVuj7QG9YVHaO+htXbD8BJZLsuUBlL0iT43R4HVtA4oJVwIHaM190e
+3p9xxCPvgxNcoyQVTSlAPGrEqdi3pkSlDfTgnXceQHAm/NrZNuR55LU/vJtlvrsR
+ls/bxig5OgjOR1tTWsWZ/l2p3e9M1MalrQLmjAcSHm8D0W+go/MpvRLHUKKwf4ip
+mXeascClOS5cfGniLLDqN2qk4Vrh9VDlg++luyqI54zb/W1elxmofmZ1a3Hqv7HH
+b6D0jqTsNFFbjCYDcKF31QESVwA12yPeDooomf2xEG9L/zgtYE4snOtnta1J7ksf
+rK/7DZBaZmBwXarNeNQk7shBoJMBkpxqnvy5JMWzFYJ+vq6VK+uxwNrjAWALXmms
+hFZhvnEX/h0TD/7Gh0Xp/jKgGg0TpJRVcaUWi7rKibCyx/yP2FS1k2Kdzs9Z+z0Y
+zirLNRWCXf9UIltxUvu3yf5gmwBBZPCqKuy2QkPOiWaByIufOVQDJdMWNY6E0F/6
+MBr1mmz0DlP5OlvRHA==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIF2TCCA8GgAwIBAgIQHp4o6Ejy5e/DfEoeWhhntjANBgkqhkiG9w0BAQsFADBk
+MQswCQYDVQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0
+YWwgQ2VydGlmaWNhdGUgU2VydmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3Qg
+Q0EgMjAeFw0xMTA2MjQwODM4MTRaFw0zMTA2MjUwNzM4MTRaMGQxCzAJBgNVBAYT
+AmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGlnaXRhbCBDZXJ0aWZp
+Y2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAyMIICIjAN
+BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAlUJOhJ1R5tMJ6HJaI2nbeHCOFvEr
+jw0DzpPMLgAIe6szjPTpQOYXTKueuEcUMncy3SgM3hhLX3af+Dk7/E6J2HzFZ++r
+0rk0X2s682Q2zsKwzxNoysjL67XiPS4h3+os1OD5cJZM/2pYmLcX5BtS5X4HAB1f
+2uY+lQS3aYg5oUFgJWFLlTloYhyxCwWJwDaCFCE/rtuh/bxvHGCGtlOUSbkrRsVP
+ACu/obvLP+DHVxxX6NZp+MEkUp2IVd3Chy50I9AU/SpHWrumnf2U5NGKpV+GY3aF
+y6//SSj8gO1MedK75MDvAe5QQQg1I3ArqRa0jG6F6bYRzzHdUyYb3y1aSgJA/MTA
+tukxGggo5WDDH8SQjhBiYEQN7Aq+VRhxLKX0srwVYv8c474d2h5Xszx+zYIdkeNL
+6yxSNLCK/RJOlrDrcH+eOfdmQrGrrFLadkBXeyq96G4DsguAhYidDMfCd7Camlf0
+uPoTXGiTOmekl9AbmbeGMktg2M7v0Ax/lZ9vh0+Hio5fCHyqW/xavqGRn1V9TrAL
+acywlKinh/LTSlDcX3KwFnUey7QYYpqwpzmqm59m2I2mbJYV4+by+PGDYmy7Velh
+k6M99bFXi08jsJvllGov34zflVEpYKELKeRcVVi3qPyZ7iVNTA6z00yPhOgpD/0Q
+VAKFyPnlw4vP5w8CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0hBBYw
+FDASBgdghXQBUwIBBgdghXQBUwIBMBIGA1UdEwEB/wQIMAYBAf8CAQcwHQYDVR0O
+BBYEFE0mICKJS9PVpAqhb97iEoHF8TwuMB8GA1UdIwQYMBaAFE0mICKJS9PVpAqh
+b97iEoHF8TwuMA0GCSqGSIb3DQEBCwUAA4ICAQAyCrKkG8t9voJXiblqf/P0wS4R
+fbgZPnm3qKhyN2abGu2sEzsOv2LwnN+ee6FTSA5BesogpxcbtnjsQJHzQq0Qw1zv
+/2BZf82Fo4s9SBwlAjxnffUy6S8w5X2lejjQ82YqZh6NM4OKb3xuqFp1mrjX2lhI
+REeoTPpMSQpKwhI3qEAMw8jh0FcNlzKVxzqfl9NX+Ave5XLzo9v/tdhZsnPdTSpx
+srpJ9csc1fV5yJmz/MFMdOO0vSk3FQQoHt5FRnDsr7p4DooqzgB53MBfGWcsa0vv
+aGgLQ+OswWIJ76bdZWGgr4RVSJFSHMYlkSrQwSIjYVmvRRGFHQEkNI/Ps/8XciAT
+woCqISxxOQ7Qj1zB09GOInJGTB2Wrk9xseEFKZZZ9LuedT3PDTcNYtsmjGOpI99n
+Bjx8Oto0QuFmtEYE3saWmA9LSHokMnWRn6z3aOkquVVlzl1h0ydw2Df+n7mvoC5W
+t6NlUe07qxS/TFED6F+KBZvuim6c779o+sjaC+NCydAXFJy3SuCvkychVSa1ZC+N
+8f+mQAWFBVzKBxlcCxMoTFh/wqXvRdpg065lYZ1Tg3TCrvJcwhbtkj6EPnNgiLx2
+9CzP0H1907he0ZESEOnN3col49XtmS++dYFLJPlFRpTJKSFTnCZFqhMX5OfNeOI5
+wSsSnqaeG8XmDtkx2Q==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIF4DCCA8igAwIBAgIRAPL6ZOJ0Y9ON/RAdBB92ylgwDQYJKoZIhvcNAQELBQAw
+ZzELMAkGA1UEBhMCY2gxETAPBgNVBAoTCFN3aXNzY29tMSUwIwYDVQQLExxEaWdp
+dGFsIENlcnRpZmljYXRlIFNlcnZpY2VzMR4wHAYDVQQDExVTd2lzc2NvbSBSb290
+IEVWIENBIDIwHhcNMTEwNjI0MDk0NTA4WhcNMzEwNjI1MDg0NTA4WjBnMQswCQYD
+VQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2Vy
+dGlmaWNhdGUgU2VydmljZXMxHjAcBgNVBAMTFVN3aXNzY29tIFJvb3QgRVYgQ0Eg
+MjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMT3HS9X6lds93BdY7Bx
+UglgRCgzo3pOCvrY6myLURYaVa5UJsTMRQdBTxB5f3HSek4/OE6zAMaVylvNwSqD
+1ycfMQ4jFrclyxy0uYAyXhqdk/HoPGAsp15XGVhRXrwsVgu42O+LgrQ8uMIkqBPH
+oCE2G3pXKSinLr9xJZDzRINpUKTk4RtiGZQJo/PDvO/0vezbE53PnUgJUmfANykR
+HvvSEaeFGHR55E+FFOtSN+KxRdjMDUN/rhPSays/p8LiqG12W0OfvrSdsyaGOx9/
+5fLoZigWJdBLlzin5M8J0TbDC77aO0RYjb7xnglrPvMyxyuHxuxenPaHZa0zKcQv
+idm5y8kDnftslFGXEBuGCxobP/YCfnvUxVFkKJ3106yDgYjTdLRZncHrYTNaRdHL
+OdAGalNgHa/2+2m8atwBz735j9m9W8E6X47aD0upm50qKGsaCnw8qyIL5XctcfaC
+NYGu+HuB5ur+rPQam3Rc6I8k9l2dRsQs0h4rIWqDJ2dVSqTjyDKXZpBy2uPUZC5f
+46Fq9mDU5zXNysRojddxyNMkM3OxbPlq4SjbX8Y96L5V5jcb7STZDxmPX2MYWFCB
+UWVv8p9+agTnNCRxunZLWB4ZvRVgRaoMEkABnRDixzgHcgplwLa7JSnaFp6LNYth
+7eVxV4O1PHGf40+/fh6Bn0GXAgMBAAGjgYYwgYMwDgYDVR0PAQH/BAQDAgGGMB0G
+A1UdIQQWMBQwEgYHYIV0AVMCAgYHYIV0AVMCAjASBgNVHRMBAf8ECDAGAQH/AgED
+MB0GA1UdDgQWBBRF2aWBbj2ITY1x0kbBbkUe88SAnTAfBgNVHSMEGDAWgBRF2aWB
+bj2ITY1x0kbBbkUe88SAnTANBgkqhkiG9w0BAQsFAAOCAgEAlDpzBp9SSzBc1P6x
+XCX5145v9Ydkn+0UjrgEjihLj6p7jjm02Vj2e6E1CqGdivdj5eu9OYLU43otb98T
+PLr+flaYC/NUn81ETm484T4VvwYmneTwkLbUwp4wLh/vx3rEUMfqe9pQy3omywC0
+Wqu1kx+AiYQElY2NfwmTv9SoqORjbdlk5LgpWgi/UOGED1V7XwgiG/W9mR4U9s70
+WBCCswo9GcG/W6uqmdjyMb3lOGbcWAXH7WMaLgqXfIeTK7KK4/HsGOV1timH59yL
+Gn602MnTihdsfSlEvoqq9X46Lmgxk7lq2prg2+kupYTNHAq4Sgj5nPFhJpiTt3tm
+7JFe3VE/23MPrQRYCd0EApUKPtN236YQHoA96M2kZNEzx5LH4k5E4wnJTsJdhw4S
+nr8PyQUQ3nqjsTzyP6WqJ3mtMX0f/fwZacXduT98zca0wjAefm6S139hdlqP65VN
+vBFuIXxZN5nQBrz5Bm0yFqXZaajh3DyAHmBR3NdUIR7KYndP+tiPsys6DXhyyWhB
+WkdKwqPrGtcKqzwyVcgKEZzfdNbwQBUdyLmPtTbFr/giuMod89a2GQ+fYWVq6nTI
+fI/DT11lgh/ZDYnadXL77/FHZxOzyNEZiCcmmpl5fx7kLD977vHeTYuWl8PVP3wb
+I+2ksx0WckNLIOFZfsLorSa/ovc=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV
+BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln
+biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF
+MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT
+d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC
+CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8
+76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+
+bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c
+6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE
+emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd
+MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt
+MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y
+MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y
+FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi
+aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM
+gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB
+qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7
+lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn
+8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov
+L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6
+45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO
+UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5
+O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC
+bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv
+GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a
+77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC
+hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3
+92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp
+Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w
+ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt
+Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFejCCA2KgAwIBAgIJAN7E8kTzHab8MA0GCSqGSIb3DQEBCwUAMEoxCzAJBgNV
+BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxJDAiBgNVBAMTG1N3aXNzU2ln
+biBHb2xkIFJvb3QgQ0EgLSBHMzAeFw0wOTA4MDQxMzMxNDdaFw0zNzA4MDQxMzMx
+NDdaMEoxCzAJBgNVBAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxJDAiBgNV
+BAMTG1N3aXNzU2lnbiBHb2xkIFJvb3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEB
+BQADggIPADCCAgoCggIBAMPon8hlWp1nG8FFl7S0h0NbYWCAnvJ/XvlnRN1E+qu1
+q3f/KhlMzm/Ej0Gf4OLNcuDR1FJhQQkKvwpw++CDaWEpytsimlul5t0XlbBvhI46
+PmRaQfsbWPz9Kz6ypOasyYK8zvaV+Jd37Sb2WK6eJ+IPg+zFNljIe8/Vh6GphxoT
+Z2EBbaZpnOKQ8StoZfPosHz8gj3erdgKAAlEeROc8P5udXvCvLNZAQt8xdUt8L//
+bVfSSYHrtLNQrFv5CxUVjGn/ozkB7fzc3CeXjnuL1Wqm1uAdX80Bkeb1Ipi6LgkY
+OG8TqIHS+yE35y20YueBkLDGeVm3Z3X+vo87+jbsr63ST3Q2AeVXqyMEzEpel89+
+xu+MzJUjaY3LOMcZ9taKABQeND1v2gwLw7qX/BFLUmE+vzNnUxC/eBsJwke6Hq9Y
+9XWBf71W8etW19lpDAfpNzGwEhwy71bZvnorfL3TPbxqM006PFAQhyfHegpnU9t/
+gJvoniP6+Qg6i6GONFpIM19k05eGBxl9iJTOKnzFat+vvKmfzTqmurtU+X+P388O
+WsStmryzOndzg0yTPJBotXxQlRHIgl6UcdBBGPvJxmXszom2ziKzEVs/4J0+Gxho
+DaoDoWdZv2udvPjyZS+aQTpF2F7QNmxvOx5jtI6YTBPbIQ6fe+3qoKpxw+ujoNIl
+AgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud
+DgQWBBRclwZGNKvfMMV8xQ1VcWYwtWCPnjAfBgNVHSMEGDAWgBRclwZGNKvfMMV8
+xQ1VcWYwtWCPnjANBgkqhkiG9w0BAQsFAAOCAgEAd0tN3uqFSqssJ9ZFx/FfIMFb
+YO0Hy6Iz3DbPx5TxBsfV2s/NrYQ+/xJIf0HopWZXMMQd5KcaLy1Cwe9Gc7LV9Vr9
+Dnpr0sgxow1IlldlY1UYwPzkisyYhlurDIonN/ojaFlcJtehwcK5Tiz/KV7mlAu+
+zXJPleiP9ve4Pl7Oz54RyawDKUiKqbamNLmsQP/EtnM3scd/qVHbSypHX0AkB4gG
+tySz+3/3sIsz+r8jdaNc/qplGsK+8X2BdwOBsY3XlQ16PEKYt4+pfVDh31IGmqBS
+VHiDB2FSCTdeipynxlHRXGPRhNzC29L6Wxg2fWa81CiXL3WWHIQHrIuOUxG+JCGq
+Z/LBrYic07B4Z3j101gDIApdIPG152XMDiDj1d/mLxkrhWjBBCbPj+0FU6HdBw7r
+QSbHtKksW+NpPWbAYhvAqobAN8MxBIZwOb5rXyFAQaB/5dkPOEtwX0n4hbgrLqof
+k0FD+PuydDwfS1dbt9RRoZJKzr4Qou7YFCJ7uUG9jemIqdGPAxpg/z+HiaCZJyJm
+sD5onnKIUTidEz5FbQXlRrVz7UOGsRQKHrzaDb8eJFxmjw6+of3G62m8Q3nXA3b5
+3IeZuJjEzX9tEPkQvixC/pwpTYNrCr21jsRIiv0hB6aAfR+b6au9gmFECnEnX22b
+kJ6u/zYks2gD1pWMa3M=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFwTCCA6mgAwIBAgIITrIAZwwDXU8wDQYJKoZIhvcNAQEFBQAwSTELMAkGA1UE
+BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEjMCEGA1UEAxMaU3dpc3NTaWdu
+IFBsYXRpbnVtIENBIC0gRzIwHhcNMDYxMDI1MDgzNjAwWhcNMzYxMDI1MDgzNjAw
+WjBJMQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMSMwIQYDVQQD
+ExpTd2lzc1NpZ24gUGxhdGludW0gQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQAD
+ggIPADCCAgoCggIBAMrfogLi2vj8Bxax3mCq3pZcZB/HL37PZ/pEQtZ2Y5Wu669y
+IIpFR4ZieIbWIDkm9K6j/SPnpZy1IiEZtzeTIsBQnIJ71NUERFzLtMKfkr4k2Htn
+IuJpX+UFeNSH2XFwMyVTtIc7KZAoNppVRDBopIOXfw0enHb/FZ1glwCNioUD7IC+
+6ixuEFGSzH7VozPY1kneWCqv9hbrS3uQMpe5up1Y8fhXSQQeol0GcN1x2/ndi5ob
+jM89o03Oy3z2u5yg+gnOI2Ky6Q0f4nIoj5+saCB9bzuohTEJfwvH6GXp43gOCWcw
+izSC+13gzJ2BbWLuCB4ELE6b7P6pT1/9aXjvCR+htL/68++QHkwFix7qepF6w9fl
++zC8bBsQWJj3Gl/QKTIDE0ZNYWqFTFJ0LwYfexHihJfGmfNtf9dng34TaNhxKFrY
+zt3oEBSa/m0jh26OWnA81Y0JAKeqvLAxN23IhBQeW71FYyBrS3SMvds6DsHPWhaP
+pZjydomyExI7C3d3rLvlPClKknLKYRorXkzig3R3+jVIeoVNjZpTxN94ypeRSCtF
+KwH3HBqi7Ri6Cr2D+m+8jVeTO9TUps4e8aCxzqv9KyiaTxvXw3LbpMS/XUz13XuW
+ae5ogObnmLo2t/5u7Su9IPhlGdpVCX4l3P5hYnL5fhgC72O00Puv5TtjjGePAgMB
+AAGjgawwgakwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O
+BBYEFFCvzAeHFUdvOMW0ZdHelarp35zMMB8GA1UdIwQYMBaAFFCvzAeHFUdvOMW0
+ZdHelarp35zMMEYGA1UdIAQ/MD0wOwYJYIV0AVkBAQEBMC4wLAYIKwYBBQUHAgEW
+IGh0dHA6Ly9yZXBvc2l0b3J5LnN3aXNzc2lnbi5jb20vMA0GCSqGSIb3DQEBBQUA
+A4ICAQAIhab1Fgz8RBrBY+D5VUYI/HAcQiiWjrfFwUF1TglxeeVtlspLpYhg0DB0
+uMoI3LQwnkAHFmtllXcBrqS3NQuB2nEVqXQXOHtYyvkv+8Bldo1bAbl93oI9ZLi+
+FHSjClTTLJUYFzX1UWs/j6KWYTl4a0vlpqD4U99REJNi54Av4tHgvI42Rncz7Lj7
+jposiU0xEQ8mngS7twSNC/K5/FqdOxa3L8iYq/6KUFkuozv8KV2LwUvJ4ooTHbG/
+u0IdUt1O2BReEMYxB+9xJ/cbOQncguqLs5WGXv312l0xpuAxtpTmREl0xRbl9x8D
+YSjFyMsSoEJL+WuICI20MhjzdZ/EfwBPBZWcoxcCw7NTm6ogOSkrZvqdr16zktK1
+puEa+S1BaYEUtLS17Yk9zvupnTVCRLEcFHOBzyoBNZox1S2PbYTfgE1X4z/FhHXa
+icYwu+uPyyIIoK6q8QNsOktNCaUOcsZWayFCTiMlFGiudgp8DAdwZPmaL/YFOSbG
+DI8Zf0NebvRbFS/bYV3mZy8/CJT5YLSYMdp08YSTcU1f+2BY0fvEwW2JorsgH51x
+kcsymxM9Pn2SUjWskpSi0xjCfMfqr3YFFt1nJ8J+HAciIfNAChs0B0QTwoRqjt8Z
+Wr9/6x3iGjjRXK9HkmuAtTClyY3YqzGBH9/CZjfTk6mFhnll0g==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFgTCCA2mgAwIBAgIIIj+pFyDegZQwDQYJKoZIhvcNAQELBQAwTjELMAkGA1UE
+BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEoMCYGA1UEAxMfU3dpc3NTaWdu
+IFBsYXRpbnVtIFJvb3QgQ0EgLSBHMzAeFw0wOTA4MDQxMzM0MDRaFw0zNzA4MDQx
+MzM0MDRaME4xCzAJBgNVBAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxKDAm
+BgNVBAMTH1N3aXNzU2lnbiBQbGF0aW51bSBSb290IENBIC0gRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCUoO8TG59EIBvNxaoiu9nyUj56Wlh35o2h
+K8ncpPPksxOUAGKbHPJDUEOBfq8wNkmsGIkMGEW4PsdUbePYmllriholqba1Dbd9
+I/BffagHqfc+hi7IAU3c5jbtHeU3B2kSS+OD0QQcJPAfcHHnGe1zSG6VKxW2VuYC
+31bpm/rqpu7gwsO64MzGyHvXbzqVmzqPvlss0qmgOD7WiOGxYhOO3KswZ82oaqZj
+K4Kwy8c9Tu1y9n2rMk5lAusPmXT4HBoojA5FAJMsFJ9txxue9orce3jjtJRHHU0F
+bYR6kFSynot1woDfhzk/n/tIVAeNoCn1+WBfWnLou5ugQuAIADSjFTwT49YaawKy
+lCGjnUG8KmtOMzumlDj8PccrM7MuKwZ0rJsQb8VORfddoVYDLA1fer0e3h13kGva
+pS2KTOnfQfTnS+x9lUKfTKkJD0OIPz2T5yv0ekjaaMTdEoAxGl0kVCamJCGzTK3a
+Fwg2AlfGnIZwyXXJnnxh2HjmuegUafkcECgSXUt1ULo80GdwVVVWS/s9HNjbeU2X
+37ie2xcs1TUHuFCp9473Vv96Z0NPINnKZtY4YEvulDHWDaJIm/80aZTGNfWWiO+q
+ZsyBputMU/8ydKe2nZhXtLomqfEzM2J+OrADEVf/3G8RI60+xgrQzFS3LcKTHeXC
+pozH2O9T9wIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
+/zAdBgNVHQ4EFgQUVio/kFj0F1oUstcIG4VbVGpUGigwHwYDVR0jBBgwFoAUVio/
+kFj0F1oUstcIG4VbVGpUGigwDQYJKoZIhvcNAQELBQADggIBAGztiudDqHknm7jP
+hz5kOBiMEUKShjfgWMMb7gQu94TsgxBoDH94LZzCl442ThbYDuprSK1Pnl0NzA2p
+PhiFfsxomTk11tifhsEy+01lsyIUS8iFZtoX/3GRrJxWV95xLFZCv/jNDvCi0//S
+IhX70HgKfuGwWs6ON9upnueVz2PyLA3S+m/zyNX7ALf3NWcQ03tS7BAy+L/dXsmm
+gqTxsL8dLt0l5L1N8DWpkQFH+BAClFvrPusNutUdYyylLqvn4x6j7kuqX7FmAbSC
+WvlGS8fx+N8svv113ZY4mjc6bqXmMhVus5DAOYp0pZWgvg0uiXnNKVaOw15XUcQF
+bwRVj4HpTL1ZRssqvE3JHfLGTwXkyAQN925P2sM6nNLC9enGJHoUPhxCMKgCRTGp
+/FCp3NyGOA9bkz9/CE5qDSc6EHlWwxW4PgaG9tlwZ691eoviWMzGdU8yVcVsFAko
+O/KV5GreLCgHraB9Byjd1Fqj6aZ8E4yZC1J429nR3z5aQ3Z/RmBTws3ndkd8Vc20
+OWQQW5VLNV1EgyTV4C4kDMGAbmkAgAZ3CmaCEAxRbzeJV9vzTOW4ue4jZpdgt1Ld
+2Zb7uoo7oE3OXvBETJDMIU8bOphrjjGD+YMIUssZwTVr7qEVW4g/bazyNJJTpjAq
+E9fmhqhd2ULSx52peovL3+6iMcLl
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE
+BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu
+IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow
+RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY
+U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A
+MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv
+Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br
+YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF
+nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH
+6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt
+eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/
+c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ
+MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH
+HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf
+jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6
+5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB
+rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
+F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c
+wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0
+cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB
+AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp
+WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9
+xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ
+2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ
+IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8
+aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X
+em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR
+dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/
+OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+
+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy
+tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFfjCCA2agAwIBAgIJAKqIsFoLsXabMA0GCSqGSIb3DQEBCwUAMEwxCzAJBgNV
+BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxJjAkBgNVBAMTHVN3aXNzU2ln
+biBTaWx2ZXIgUm9vdCBDQSAtIEczMB4XDTA5MDgwNDEzMTkxNFoXDTM3MDgwNDEz
+MTkxNFowTDELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEmMCQG
+A1UEAxMdU3dpc3NTaWduIFNpbHZlciBSb290IENBIC0gRzMwggIiMA0GCSqGSIb3
+DQEBAQUAA4ICDwAwggIKAoICAQC+h5sF5nF8Um9t7Dep6bPczF9/01DqIZsE8D2/
+vo7JpRQWMhDPmfzscK1INmckDBcy1inlSjmxN+umeAxsbxnKTvdR2hro+iE4bJWc
+L9aLzDsCm78mmxFFtrg0Wh2mVEhSyJ14cc5ISsyneIPcaKtmHncH0zYYCNfUbWD4
+8HnTMzYJkmO3BJr1p5baRa90GvyC46hbDjo/UleYfrycjMHAslrfxH7+DKZUdoN+
+ut3nKvRKNk+HZS6lujmNWWEp89OOJHCMU5sRpUcHsnUFXA2E2UTZzckmRFduAn2V
+AdSrJIbuPXD7V/qwKRTQnfLFl8sJyvHyPefYS5bpiC+eR1GKVGWYSNIS5FR3DAfm
+vluc8d0Dfo2E/L7JYtX8yTroibVfwgVSYfCcPuwuTYxykY7IQ8GiKF71gCTc4i+H
+O1MA5cvwsnyNeRmgiM14+MWKWnflBqzdSt7mcG6+r771sasOCLDboD+Uxb4Subx7
+J3m1MildrsUgI5IDe1Q5sIkiVG0S48N46jpA/aSTrOktiDzbpkdmTN/YF+0W3hrW
+10Fmvx2A8aTgZBEpXgwnBWLr5cQEYtHEnwxqVdZYOJxmD537q1SAmZzsSdaCn9pF
+1j9TBgO3/R/shn104KS06DK2qgcj+O8kQZ5jMHj0VN2O8Fo4jhJ/eMdvAlYhM864
+uK1pVQIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAd
+BgNVHQ4EFgQUoYxFkwoSYwunV18ySn3hIee3PmYwHwYDVR0jBBgwFoAUoYxFkwoS
+YwunV18ySn3hIee3PmYwDQYJKoZIhvcNAQELBQADggIBAIeuYW1IOCrGHNxKLoR4
+ScAjKkW4NU3RBfq5BTPEZL3brVQWKrA+DVoo2qYagHMMxEFvr7g0tnfUW44dC4tG
+kES1s+5JGInBSzSzhzV0op5FZ+1FcWa2uaElc9fCrIj70h2na9rAWubYWWQ0l2Ug
+MTMDT86tCZ6u6cI+GHW0MyUSuwXsULpxQOK93ohGBSGEi6MrHuswMIm/EfVcRPiR
+i0tZRQswDcoMT29jvgT+we3gh/7IzVa/5dyOetTWKU6A26ubP45lByL3RM2WHy3H
+9Qm2mHD/ONxQFRGEO3+p8NgkVMgXjCsTSdaZf0XRD46/aXI3Uwf05q79Wz55uQbN
+uIF4tE2g0DW65K7/00m8Ne1jxrP846thWgW2C+T/qSq+31ROwktcaNqjMqLJTVcY
+UzRZPGaZ1zwCeKdMcdC/2/HEPOcB5gTyRPZIJjAzybEBGesC8cwh+joCMBedyF+A
+P90lrAKb4xfevcqSFNJSgVPm6vwwZzKpYvaTFxUHMV4PG2n19Km3fC2z7YREMkco
+BzuGaUWpxzaWkHJ02BKmcyPRTrm2ejrEKaFQBhG52fQmbmIIEiAW8AFXF9QFNmeX
+61H5/zMkDAUPVr/vPRxSjoreaQ9aH/DVAzFEs5LG6nWorrvHYAOImP/HBIRSkIbh
+tJOpUC/o69I2rDBgp9ADE7UK
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIICqDCCAi2gAwIBAgIQIW4zpcvTiKRvKQe0JzzE2DAKBggqhkjOPQQDAzCBlDEL
+MAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYD
+VQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBD
+bGFzcyAxIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0g
+RzQwHhcNMTExMDA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBlDELMAkGA1UEBhMC
+VVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZTeW1h
+bnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAxIFB1
+YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcq
+hkjOPQIBBgUrgQQAIgNiAATXZrUb266zYO5G6ohjdTsqlG3zXxL24w+etgoUU0hS
+yNw6s8tIICYSTvqJhNTfkeQpfSgB2dsYQ2mhH7XThhbcx39nI9/fMTGDAzVwsUu3
+yBe7UcvclBfb6gk7dhLeqrWjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E
+BTADAQH/MB0GA1UdDgQWBBRlwI0l9Qy6l3eQP54u4Fr1ztXh5DAKBggqhkjOPQQD
+AwNpADBmAjEApa7jRlP4mDbjIvouKEkN7jB+M/PsP3FezFWJeJmssv3cHFwzjim5
+axfIEWi13IMHAjEAnMhE2mnCNsNUGRCFAtqdR+9B52wmnQk9922Q0QVEL7C8g5No
+8gxFSTm/mQQc0xCg
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIID9jCCAt6gAwIBAgIQJDJ18h0v0gkz97RqytDzmDANBgkqhkiG9w0BAQsFADCB
+lDELMAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8w
+HQYDVQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRl
+YyBDbGFzcyAxIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5
+IC0gRzYwHhcNMTExMDE4MDAwMDAwWhcNMzcxMjAxMjM1OTU5WjCBlDELMAkGA1UE
+BhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZT
+eW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAx
+IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzYwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHOddJZKmZgiJM6kXZBxbje/SD
+6Jlz+muxNuCad6BAwoGNAcfMjL2Pffd543pMA03Z+/2HOCgs3ZqLVAjbZ/sbjP4o
+ki++t7JIp4Gh2F6Iw8w5QEFa0dzl2hCfL9oBTf0uRnz5LicKaTfukaMbasxEvxvH
+w9QRslBglwm9LiL1QYRmn81ApqkAgMEflZKf3vNI79sdd2H8f9/ulqRy0LY+/3gn
+r8uSFWkI22MQ4uaXrG7crPaizh5HmbmJtxLmodTNWRFnw2+F2EJOKL5ZVVkElauP
+N4C/DfD8HzpkMViBeNfiNfYgPym4jxZuPkjctUwH4fIa6n4KedaovetdhitNAgMB
+AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
+BBQzQejIORIVk0jyljIuWvXalF9TYDANBgkqhkiG9w0BAQsFAAOCAQEAFeNzV7EX
+tl9JaUSm9l56Z6zS3nVJq/4lVcc6yUQVEG6/MWvL2QeTfxyFYwDjMhLgzMv7OWyP
+4lPiPEAz2aSMR+atWPuJr+PehilWNCxFuBL6RIluLRQlKCQBZdbqUqwFblYSCT3Q
+dPTXvQbKqDqNVkL6jXI+dPEDct+HG14OelWWLDi3mIXNTTNEyZSPWjEwN0ujOhKz
+5zbRIWhLLTjmU64cJVYIVgNnhJ3Gw84kYsdMNs+wBkS39V8C3dlU6S+QTnrIToNA
+DJqXPDe/v+z28LSFdyjBC8hnghAXOKK3Buqbvzr46SMHv3TgmDgVVXjucgBcGaP0
+0jPg/73RVDkpDw==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIICqDCCAi2gAwIBAgIQNBdlEkA7t1aALYDLeVWmHjAKBggqhkjOPQQDAzCBlDEL
+MAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYD
+VQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBD
+bGFzcyAyIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0g
+RzQwHhcNMTExMDA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBlDELMAkGA1UEBhMC
+VVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZTeW1h
+bnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAyIFB1
+YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcq
+hkjOPQIBBgUrgQQAIgNiAATR2UqOTA2ESlG6fO/TzPo6mrWnYxM9AeBJPvrBR8mS
+szrX/m+c95o6D/UOCgrDP8jnEhSO1dVtmCyzcTIK6yq99tdqIAtnRZzSsr9TImYJ
+XdsR8/EFM1ij4rjPfM2Cm72jQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E
+BTADAQH/MB0GA1UdDgQWBBQ9MvM6qQyQhPmijGkGYVQvh3L+BTAKBggqhkjOPQQD
+AwNpADBmAjEAyKapr0F/tckRQhZoaUxcuCcYtpjxwH+QbYfTjEYX8D5P/OqwCMR6
+S7wIL8fip29lAjEA1lnehs5fDspU1cbQFQ78i5Ry1I4AWFPPfrFLDeVQhuuea9//
+KabYR9mglhjb8kWz
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIID9jCCAt6gAwIBAgIQZIKe/DcedF38l/+XyLH/QTANBgkqhkiG9w0BAQsFADCB
+lDELMAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8w
+HQYDVQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRl
+YyBDbGFzcyAyIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5
+IC0gRzYwHhcNMTExMDE4MDAwMDAwWhcNMzcxMjAxMjM1OTU5WjCBlDELMAkGA1UE
+BhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZT
+eW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAy
+IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzYwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDNzOkFyGOFyz9AYxe9GPo15gRn
+V2WYKaRPyVyPDzTS+NqoE2KquB5QZ3iwFkygOakVeq7t0qLA8JA3KRgmXOgNPLZs
+ST/B4NzZS7YUGQum05bh1gnjGSYc+R9lS/kaQxwAg9bQqkmi1NvmYji6UBRDbfkx
++FYW2TgCkc/rbN27OU6Z4TBnRfHU8I3D3/7yOAchfQBeVkSz5GC9kSucq1sEcg+y
+KNlyqwUgQiWpWwNqIBDMMfAr2jUs0Pual07wgksr2F82owstr2MNHSV/oW5cYqGN
+KD6h/Bwg+AEvulWaEbAZ0shQeWsOagXXqgQ2sqPy4V93p3ec5R7c6d9qwWVdAgMB
+AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
+BBSHjCCVyJhK0daABkqQNETfHE2/sDANBgkqhkiG9w0BAQsFAAOCAQEAgY6ypWaW
+tyGltu9vI1pf24HFQqV4wWn99DzX+VxrcHIa/FqXTQCAiIiCisNxDY7FiZss7Y0L
+0nJU9X3UXENX6fOupQIR9nYrgVfdfdp0MP1UR/bgFm6mtApI5ud1Bw8pGTnOefS2
+bMVfmdUfS/rfbSw8DVSAcPCIC4DPxmiiuB1w2XaM/O6lyc+tHc+ZJVdaYkXLFmu9
+Sc2lo4xpeSWuuExsi0BmSxY/zwIa3eFsawdhanYVKZl/G92IgMG/tY9zxaaWI4Sm
+KIYkM2oBLldzJbZev4/mHWGoQClnHYebHX+bn5nNMdZUvmK7OaxoEkiRIKXLsd3+
+b/xa5IJVWa8xqQ==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIICpzCCAi2gAwIBAgIQTHm1miicdjFk9YlE0JEC3jAKBggqhkjOPQQDAzCBlDEL
+MAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYD
+VQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBD
+bGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0g
+RzQwHhcNMTIxMDE4MDAwMDAwWhcNMzcxMjAxMjM1OTU5WjCBlDELMAkGA1UEBhMC
+VVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZTeW1h
+bnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAzIFB1
+YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcq
+hkjOPQIBBgUrgQQAIgNiAARXz+qzOU0/oSHgbi84csaHl/OFC0fnD1HI0fSZm8pZ
+Zf9M+eoLtyXV0vbsMS0yYhLXdoan+jjJZdT+c+KEOfhMSWIT3brViKBfPchPsD+P
+oVAR5JNGrcNfy/GkapVW6MCjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E
+BTADAQH/MB0GA1UdDgQWBBQknbzScfcdwiW+IvGJpSwVOzQeXjAKBggqhkjOPQQD
+AwNoADBlAjEAuWZoZdsF0Dh9DvPIdWG40CjEsUozUVj78jwQyK5HeHbKZiQXhj5Q
+Vm6lLZmIuL0kAjAD6qfnqDzqnWLGX1TamPR3vU+PGJyRXEdrQE0QHbPhicoLIsga
+xcX+i93B3294n5E=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIF9jCCA96gAwIBAgIQZWNxhdNvRcaPfzH5CYeSgjANBgkqhkiG9w0BAQwFADCB
+lDELMAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8w
+HQYDVQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRl
+YyBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5
+IC0gRzYwHhcNMTIxMDE4MDAwMDAwWhcNMzcxMjAxMjM1OTU5WjCBlDELMAkGA1UE
+BhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZT
+eW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAz
+IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzYwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC3DrL6TbyachX7d1vb/UMPywv3
+YC6zK34Mu1PyzE5l8xm7/zUd99Opu0Attd141Kb5N+qFBXttt+YTSwZ8+3ZjjyAd
+LTgrBIXy6LDRX01KIclq2JTqHgJQpqqQB6BHIepm+QSg5oPwxPVeluInTWHDs8GM
+IrZmoQDRVin77cF/JMo9+lqUsITDx7pDHP1kDvEo+0dZ8ibhMblE+avd+76+LDfj
+rAsY0/wBovGkCjWCR0yrvYpe3xOF/CDMSFmvr0FvyyPNypOn3dVfyGQ7/wEDoApP
+LW49hL6vyDKyUymQFfewBZoKPPa5BpDJpeFdoDuw/qi2v/WJKFckOiGGceTciotB
+VeweMCRZ0cBZuHivqlp03iWAMJjtMERvIXAc2xJTDtamKGaTLB/MTzwbgcW59nhv
+0DI6CHLbaw5GF4WU87zvvPekXo7p6bVk5bdLRRIsTDe3YEMKTXEGAJQmNXQfu3o5
+XE475rgD4seTi4QsJUlF3X8jlGAfy+nN9quX92Hn+39igcjcCjBcGHzmzu/Hbh6H
+fLPpysh7avRo/IOlDFa0urKNSgrHl5fFiDAVPRAIVBVycmczM/R8t84AJ1NlziTx
+WmTnNi/yLgLCl99y6AIeoPc9tftoYAP6M6nmEm0G4amoXU48/tnnAGWsthlNe4N/
+NEfq4RhtsYsceavnnQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
+BAUwAwEB/zAdBgNVHQ4EFgQUOXEIAD7eyIbnkP/k/SEPziQZFvYwDQYJKoZIhvcN
+AQEMBQADggIBAFBriE1gSM5a4yLOZ3yEp80c/ekMA4w2rwqHDmquV64B0Da78v25
+c8FftaiuTKL6ScsHRhY2vePIVzh+OOS/JTNgxtw3nGO7XpgeGrKC8K6mdxGAREeh
+KcXwszrOmPC47NMOgAZ3IzBM/3lkYyJbd5NDS3Wz2ztuO0rd8ciutTeKlYg6EGhw
+OLlbcH7VQ8n8X0/l5ns27vAg7UdXEyYQXhQGDXt2B8LGLRb0rqdsD7yID08sAraj
+1yLmmUc12I2lT4ESOhF9s8wLdfMecKMbA+r6mujmLjY5zJnOOj8Mt674Q5mwk25v
+qtkPajGRu5zTtCj7g0x6c4JQZ9IOrO1gxbJdNZjPh34eWR0kvFa62qRa2MzmvB4Q
+jxuMjvPB27e+1LBbZY8WaPNWxSoZFk0PuGWHbSSDuGLc4EdhGoh7zk5//dzGDVqa
+pPO1TPbdMaboHREhMzAEYX0c4D5PjT+1ixIAWn2poQDUg+twuxj4pNIcgS23CBHI
+Jnu21OUPA0Zy1CVAHr5JXW2T8VyyO3VUaTqg7kwiuqya4gitRWMFSlI1dsQ09V4H
+Mq3cfCbRW4+t5OaqG3Wf61206MCpFXxOSgdy30bJ1JGSdVaw4e43NmUoxRXIK3bM
+bW8Zg/T92hXiQeczeUaDV/nxpbZt07zXU+fucW14qZen7iCcGRVyFT0E
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDcTCCAlmgAwIBAgIVAOYJ/nrqAGiM4CS07SAbH+9StETRMA0GCSqGSIb3DQEB
+BQUAMFAxCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9LcmFqb3dhIEl6YmEgUm96bGlj
+emVuaW93YSBTLkEuMRcwFQYDVQQDDA5TWkFGSVIgUk9PVCBDQTAeFw0xMTEyMDYx
+MTEwNTdaFw0zMTEyMDYxMTEwNTdaMFAxCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L
+cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRcwFQYDVQQDDA5TWkFGSVIg
+Uk9PVCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKxHL49ZMTml
+6g3wpYwrvQKkvc0Kc6oJ5sxfgmp1qZfluwbv88BdocHSiXlY8NzrVYzuWBp7J/9K
+ULMAoWoTIzOQ6C9TNm4YbA9A1jdX1wYNL5Akylf8W5L/I4BXhT9KnlI6x+a7BVAm
+nr/Ttl+utT/Asms2fRfEsF2vZPMxH4UFqOAhFjxTkmJWf2Cu4nvRQJHcttB+cEAo
+ag/hERt/+tzo4URz6x6r19toYmxx4FjjBkUhWQw1X21re//Hof2+0YgiwYT84zLb
+eqDqCOMOXxvH480yGDkh/QoazWX3U75HQExT/iJlwnu7I1V6HXztKIwCBjsxffbH
+3jOshCJtywcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AQYwHQYDVR0OBBYEFFOSo33/gnbwM9TrkmdHYTMbaDsqMA0GCSqGSIb3DQEBBQUA
+A4IBAQA5UFWd5EL/pBviIMm1zD2JLUCpp0mJG7JkwznIOzawhGmFFaxGoxAhQBEg
+haP+E0KR66oAwVC6xe32QUVSHfWqWndzbODzLB8yj7WAR0cDM45ZngSBPBuFE3Wu
+GLJX9g100ETfIX+4YBR/4NR/uvTnpnd9ete7Whl0ZfY94yuu4xQqB5QFv+P7IXXV
+lTOjkjuGXEcyQAjQzbFaT9vIABSbeCXWBbjvOXukJy6WgAiclzGNSYprre8Ryydd
+fmjW9HIGwsIO03EldivvqEYL1Hv1w/Pur+6FUEOaL68PEIUovfgwIB2BAw+vZDuw
+cH0mX548PojGyg434cDjkSXa3mHF
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx
+KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd
+BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl
+YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1
+OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy
+aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50
+ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd
+AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC
+FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi
+1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq
+jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ
+wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/
+WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy
+NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC
+uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw
+IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6
+g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN
+9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP
+BSeOE6Fuwg==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx
+KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd
+BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl
+YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1
+OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy
+aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50
+ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN
+8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/
+RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4
+hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5
+ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM
+EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1
+A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy
+WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ
+1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30
+6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT
+91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml
+e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p
+TpPDpFQUWw==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw
+NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv
+b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD
+VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2
+MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F
+VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1
+7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X
+Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+
+/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs
+81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm
+dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe
+Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu
+sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4
+pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs
+slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ
+arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD
+VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG
+9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl
+dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx
+0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj
+TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed
+Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7
+Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI
+OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7
+vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW
+t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn
+HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx
+SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDEL
+MAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMp
+IDIwMDcgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAi
+BgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMjAeFw0wNzExMDUwMDAw
+MDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh
+d3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBGb3Ig
+YXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9v
+dCBDQSAtIEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/
+BebfowJPDQfGAFG6DAJSLSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6
+papu+7qzcMBniKI11KOasf2twu8x+qi58/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8E
+BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUmtgAMADna3+FGO6Lts6K
+DPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUNG4k8VIZ3
+KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41ox
+XZ3Krr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCB
+rjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf
+Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw
+MDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNV
+BAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0wODA0MDIwMDAwMDBa
+Fw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhhd3Rl
+LCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9u
+MTgwNgYDVQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXpl
+ZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEcz
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsr8nLPvb2FvdeHsbnndm
+gcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2AtP0LMqmsywCPLLEHd5N/8
+YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC+BsUa0Lf
+b1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS9
+9irY7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2S
+zhkGcuYMXDhpxwTWvGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUk
+OQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV
+HQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJKoZIhvcNAQELBQADggEBABpA
+2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweKA3rD6z8KLFIW
+oCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu
+t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7c
+KUGRIjxpp7sC8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fM
+m7v/OeZWYdMKp8RcTGB7BXcmer/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZu
+MdRAGmI0Nj81Aa6sY6A=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB
+qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf
+Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw
+MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV
+BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw
+NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j
+LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG
+A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl
+IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs
+W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta
+3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk
+6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6
+Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J
+NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA
+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP
+r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU
+DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz
+YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX
+xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2
+/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/
+LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7
+jVaMaA==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIGHDCCBASgAwIBAgIES45gAzANBgkqhkiG9w0BAQsFADBFMQswCQYDVQQGEwJE
+SzESMBAGA1UEChMJVFJVU1QyNDA4MSIwIAYDVQQDExlUUlVTVDI0MDggT0NFUyBQ
+cmltYXJ5IENBMB4XDTEwMDMwMzEyNDEzNFoXDTM3MTIwMzEzMTEzNFowRTELMAkG
+A1UEBhMCREsxEjAQBgNVBAoTCVRSVVNUMjQwODEiMCAGA1UEAxMZVFJVU1QyNDA4
+IE9DRVMgUHJpbWFyeSBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
+AJlJodr3U1Fa+v8HnyACHV81/wLevLS0KUk58VIABl6Wfs3LLNoj5soVAZv4LBi5
+gs7E8CZ9w0F2CopW8vzM8i5HLKE4eedPdnaFqHiBZ0q5aaaQArW+qKJx1rT/AaXt
+alMB63/yvJcYlXS2lpexk5H/zDBUXeEQyvfmK+slAySWT6wKxIPDwVapauFY9QaG
++VBhCa5jBstWS7A5gQfEvYqn6csZ3jW472kW6OFNz6ftBcTwufomGJBMkonf4ZLr
+6t0AdRi9jflBPz3MNNRGxyjIuAmFqGocYFA/OODBRjvSHB2DygqQ8k+9tlpvzMRr
+kU7jq3RKL+83G1dJ3/LTjCLz4ryEMIC/OJ/gNZfE0qXddpPtzflIPtUFVffXdbFV
+1t6XZFhJ+wBHQCpJobq/BjqLWUA86upsDbfwnePtmIPRCemeXkY0qabC+2Qmd2Fe
+xyZphwTyMnbqy6FG1tB65dYf3mOqStmLa3RcHn9+2dwNfUkh0tjO2FXD7drWcU0O
+I9DW8oAypiPhm/QCjMU6j6t+0pzqJ/S0tdAo+BeiXK5hwk6aR+sRb608QfBbRAs3
+U/q8jSPByenggac2BtTN6cl+AA1Mfcgl8iXWNFVGegzd/VS9vINClJCe3FNVoUnR
+YCKkj+x0fqxvBLopOkJkmuZw/yhgMxljUi2qYYGn90OzAgMBAAGjggESMIIBDjAP
+BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjARBgNVHSAECjAIMAYGBFUd
+IAAwgZcGA1UdHwSBjzCBjDAsoCqgKIYmaHR0cDovL2NybC5vY2VzLnRydXN0MjQw
+OC5jb20vb2Nlcy5jcmwwXKBaoFikVjBUMQswCQYDVQQGEwJESzESMBAGA1UEChMJ
+VFJVU1QyNDA4MSIwIAYDVQQDExlUUlVTVDI0MDggT0NFUyBQcmltYXJ5IENBMQ0w
+CwYDVQQDEwRDUkwxMB8GA1UdIwQYMBaAFPZt+LFIs0FDAduGROUYBbdezAY3MB0G
+A1UdDgQWBBT2bfixSLNBQwHbhkTlGAW3XswGNzANBgkqhkiG9w0BAQsFAAOCAgEA
+VPAQGrT7dIjD3/sIbQW86f9CBPu0c7JKN6oUoRUtKqgJ2KCdcB5ANhCoyznHpu3m
+/dUfVUI5hc31CaPgZyY37hch1q4/c9INcELGZVE/FWfehkH+acpdNr7j8UoRZlkN
+15b/0UUBfGeiiJG/ugo4llfoPrp8bUmXEGggK3wyqIPcJatPtHwlb6ympfC2b/Ld
+v/0IdIOzIOm+A89Q0utx+1cOBq72OHy8gpGb6MfncVFMoL2fjP652Ypgtr8qN9Ka
+/XOazktiIf+2Pzp7hLi92hRc9QMYexrV/nnFSQoWdU8TqULFUoZ3zTEC3F/g2yj+
+FhbrgXHGo5/A4O74X+lpbY2XV47aSuw+DzcPt/EhMj2of7SA55WSgbjPMbmNX0rb
+oenSIte2HRFW5Tr2W+qqkc/StixgkKdyzGLoFx/xeTWdJkZKwyjqge2wJqws2upY
+EiThhC497+/mTiSuXd69eVUwKyqYp9SD2rTtNmF6TCghRM/dNsJOl+osxDVGcwvt
+WIVFF/Onlu5fu1NHXdqNEfzldKDUvCfii3L2iATTZyHwU9CALE+2eIA+PIaLgnM1
+1oCfUnYBkQurTrihvzz9PryCVkLxiqRmBVvUz+D4N5G/wvvKDS6t6cPCS+hqM482
+cbBsn0R9fFLO4El62S9eH1tqOzO20OAOK65yJIsOpSE=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBF
+MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQL
+ExNUcnVzdGlzIEZQUyBSb290IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTEx
+MzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNVBAoTD1RydXN0aXMgTGltaXRlZDEc
+MBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQRUN+
+AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihH
+iTHcDnlkH5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjj
+vSkCqPoc4Vu5g6hBSLwacY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA
+0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zto3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlB
+OrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEAAaNTMFEwDwYDVR0TAQH/
+BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAdBgNVHQ4E
+FgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01
+GX2cGE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmW
+zaD+vkAMXBJV+JOCyinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP4
+1BIy+Q7DsdwyhEQsb8tGD+pmQQ9P8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZE
+f1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHVl/9D7S3B2l0pKoU/rGXuhg8F
+jZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYliB6XzCGcKQEN
+ZetX2fNXlrtIzYE=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx
+EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT
+VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5
+NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT
+B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF
+10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz
+0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh
+MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH
+zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc
+46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2
+yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi
+laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP
+oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA
+BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE
+qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm
+4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
+/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL
+1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn
+LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF
+H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo
+RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+
+nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh
+15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW
+6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW
+nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j
+wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz
+aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy
+KwbQBM0=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES
+MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU
+V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz
+WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO
+LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm
+aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
+AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE
+AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH
+K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX
+RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z
+rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx
+3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq
+hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC
+MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls
+XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D
+lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn
+aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ
+YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRS
+MRgwFgYDVQQHDA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJp
+bGltc2VsIHZlIFRla25vbG9qaWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSw
+VEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ryb25payB2ZSBLcmlwdG9sb2ppIEFy
+YcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNVBAsMGkthbXUgU2Vy
+dGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUgS8O2
+ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAe
+Fw0wNzA4MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIx
+GDAWBgNVBAcMD0dlYnplIC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmls
+aW1zZWwgdmUgVGVrbm9sb2ppayBBcmHFn3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBU
+QUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZlIEtyaXB0b2xvamkgQXJh
+xZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2FtdSBTZXJ0
+aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7Zr
+IFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIB
+IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4h
+gb46ezzb8R1Sf1n68yJMlaCQvEhOEav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yK
+O7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1xnnRFDDtG1hba+818qEhTsXO
+fJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR6Oqeyjh1jmKw
+lZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL
+hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQID
+AQABo0IwQDAdBgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/
+BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmP
+NOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4N5EY3ATIZJkrGG2AA1nJrvhY0D7t
+wyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLTy9LQQfMmNkqblWwM
+7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYhLBOh
+gLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5n
+oN+J1q2MdqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUs
+yZyQ2uypQjyttgI=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEPTCCAyWgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvzE/MD0GA1UEAww2VMOc
+UktUUlVTVCBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sx
+c8SxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMV4wXAYDVQQKDFVUw5xS
+S1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kg
+SGl6bWV0bGVyaSBBLsWeLiAoYykgQXJhbMSxayAyMDA3MB4XDTA3MTIyNTE4Mzcx
+OVoXDTE3MTIyMjE4MzcxOVowgb8xPzA9BgNVBAMMNlTDnFJLVFJVU1QgRWxla3Ry
+b25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTELMAkGA1UEBhMC
+VFIxDzANBgNVBAcMBkFua2FyYTFeMFwGA1UECgxVVMOcUktUUlVTVCBCaWxnaSDE
+sGxldGnFn2ltIHZlIEJpbGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7F
+ni4gKGMpIEFyYWzEsWsgMjAwNzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAKu3PgqMyKVYFeaK7yc9SrToJdPNM8Ig3BnuiD9NYvDdE3ePYakqtdTyuTFY
+KTsvP2qcb3N2Je40IIDu6rfwxArNK4aUyeNgsURSsloptJGXg9i3phQvKUmi8wUG
++7RP2qFsmmaf8EMJyupyj+sA1zU511YXRxcw9L6/P8JorzZAwan0qafoEGsIiveG
+HtyaKhUG9qPw9ODHFNRRf8+0222vR5YXm3dx2KdxnSQM9pQ/hTEST7ruToK4uT6P
+IzdezKKqdfcYbwnTrqdUKDT74eA7YH2gvnmJhsifLfkKS8RQouf9eRbHegsYz85M
+733WB2+Y8a+xwXrXgTW4qhe04MsCAwEAAaNCMEAwHQYDVR0OBBYEFCnFkKslrxHk
+Yb+j/4hhkeYO/pyBMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0G
+CSqGSIb3DQEBBQUAA4IBAQAQDdr4Ouwo0RSVgrESLFF6QSU2TJ/sPx+EnWVUXKgW
+AkD6bho3hO9ynYYKVZ1WKKxmLNA6VpM0ByWtCLCPyA8JWcqdmBzlVPi5RX9ql2+I
+aE1KBiY3iAIOtsbWcpnOa3faYjGkVh+uX4132l32iPwa2Z61gfAyuOOI0JzzaqC5
+mxRZNTZPz/OOXl0XrRWV2N2y1RVuAE6zS89mlOTgzbUF2mNXi+WzqtvALhyQRNsa
+XRik7r4EW5nVcV9VZWRi1aKbBFmGyGJ353yCRWo9F7/snXUMrqNvWtMvmDb08PUZ
+qxFdyKbjKlhqQgnDvZImZjINXQhVdP+MmNAKpoRq0Tl9
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFkjCCA3qgAwIBAgIBCDANBgkqhkiG9w0BAQUFADA6MQswCQYDVQQGEwJDTjER
+MA8GA1UEChMIVW5pVHJ1c3QxGDAWBgNVBAMTD1VDQSBHbG9iYWwgUm9vdDAeFw0w
+ODAxMDEwMDAwMDBaFw0zNzEyMzEwMDAwMDBaMDoxCzAJBgNVBAYTAkNOMREwDwYD
+VQQKEwhVbmlUcnVzdDEYMBYGA1UEAxMPVUNBIEdsb2JhbCBSb290MIICIjANBgkq
+hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA2rPlBlA/9nP3xDK/RqUlYjOHsGj+p9+I
+A2N9Apb964fJ7uIIu527u+RBj8cwiQ9tJMAEbBSUgU2gDXRm8/CFr/hkGd656YGT
+0CiFmUdCSiw8OCdKzP/5bBnXtfPvm65bNAbXj6ITBpyKhELVs6OQaG2BkO5NhOxM
+cE4t3iQ5zhkAQ5N4+QiGHUPR9HK8BcBn+sBR0smFBySuOR56zUHSNqth6iur8CBV
+mTxtLRwuLnWW2HKX4AzKaXPudSsVCeCObbvaE/9GqOgADKwHLx25urnRoPeZnnRc
+GQVmMc8+KlL+b5/zub35wYH1N9ouTIElXfbZlJrTNYsgKDdfUet9Ysepk9H50DTL
+qScmLCiQkjtVY7cXDlRzq6987DqrcDOsIfsiJrOGrCOp139tywgg8q9A9f9ER3Hd
+J90TKKHqdjn5EKCgTUCkJ7JZFStsLSS3JGN490MYeg9NEePorIdCjedYcaSrbqLA
+l3y74xNLytu7awj5abQEctXDRrl36v+6++nwOgw19o8PrgaEFt2UVdTvyie3AzzF
+HCYq9TyopZWbhvGKiWf4xwxmse1Bv4KmAGg6IjTuHuvlb4l0T2qqaqhXZ1LUIGHB
+zlPL/SR/XybfoQhplqCe/klD4tPq2sTxiDEhbhzhzfN1DiBEFsx9c3Q1RSw7gdQg
+7LYJjD5IskkCAwEAAaOBojCBnzALBgNVHQ8EBAMCAQYwDAYDVR0TBAUwAwEB/zBj
+BgNVHSUEXDBaBggrBgEFBQcDAQYIKwYBBQUHAwIGCCsGAQUFBwMDBggrBgEFBQcD
+BAYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEFBQcDBwYIKwYBBQUHAwgGCCsGAQUF
+BwMJMB0GA1UdDgQWBBTZw9P4gJJnzF3SOqLXcaK0xDiALTANBgkqhkiG9w0BAQUF
+AAOCAgEA0Ih5ygiq9ws0oE4Jwul+NUiJcIQjL1HDKy9e21NrW3UIKlS6Mg7VxnGF
+sZdJgPaE0PC6t3GUyHlrpsVE6EKirSUtVy/m1jEp+hmJVCl+t35HNmktbjK81HXa
+QnO4TuWDQHOyXd/URHOmYgvbqm4FjMh/Rk85hZCdvBtUKayl1/7lWFZXbSyZoUkh
+1WHGjGHhdSTBAd0tGzbDLxLMC9Z4i3WA6UG5iLHKPKkWxk4V43I29tSgQYWvimVw
+TbVEEFDs7d9t5tnGwBLxSzovc+k8qe4bqi81pZufTcU0hF8mFGmzI7GJchT46U1R
+IgP/SobEHOh7eQrbRyWBfvw0hKxZuFhD5D1DCVR0wtD92e9uWfdyYJl2b/Unp7uD
+pEqB7CmB9HdL4UISVdSGKhK28FWbAS7d9qjjGcPORy/AeGEYWsdl/J1GW1fcfA67
+loMQfFUYCQSu0feLKj6g5lDWMDbX54s4U+xJRODPpN/xU3uLWrb2EZBL1nXz/gLz
+Ka/wI3J9FO2pXd96gZ6bkiL8HvgBRUGXx2sBYb4zaPKgZYRmvOAqpGjTcezHCN6j
+w8k2SjTxF+KAryAhk5Qe5hXTVGLxtTgv48y5ZwSpuuXu+RBuyy5+E6+SFP7zJ3N7
+OPxzbbm5iPZujAv1/P8JDrMtXnt145Ik4ubhWD5LKAN1axibRww=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDhDCCAmygAwIBAgIBCTANBgkqhkiG9w0BAQUFADAzMQswCQYDVQQGEwJDTjER
+MA8GA1UEChMIVW5pVHJ1c3QxETAPBgNVBAMTCFVDQSBSb290MB4XDTA0MDEwMTAw
+MDAwMFoXDTI5MTIzMTAwMDAwMFowMzELMAkGA1UEBhMCQ04xETAPBgNVBAoTCFVu
+aVRydXN0MREwDwYDVQQDEwhVQ0EgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ADCCAQoCggEBALNdB8qGJn1r4vs4CQ7MgsJqGgCiFV/W6dQBt1YDAVmP9ThpJHbC
+XivF9iu/r/tB/Q9a/KvXg3BNMJjRnrJ2u5LWu+kQKGkoNkTo8SzXWHwk1n8COvCB
+a2FgP/Qz3m3l6ihST/ypHWN8C7rqrsRoRuTej8GnsrZYWm0dLNmMOreIy4XU9+gD
+Xv2yTVDo1h//rgI/i0+WITyb1yXJHT/7mLFZ5PCpO6+zzYUs4mBGzG+OoOvwNMXx
+QhhgrhLtRnUc5dipllq+3lrWeGeWW5N3UPJuG96WUUqm1ktDdSFmjXfsAoR2XEQQ
+th1hbOSjIH23jboPkXXHjd+8AmCoKai9PUMCAwEAAaOBojCBnzALBgNVHQ8EBAMC
+AQYwDAYDVR0TBAUwAwEB/zBjBgNVHSUEXDBaBggrBgEFBQcDAQYIKwYBBQUHAwIG
+CCsGAQUFBwMDBggrBgEFBQcDBAYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEFBQcD
+BwYIKwYBBQUHAwgGCCsGAQUFBwMJMB0GA1UdDgQWBBTbHzXza0z/QjFkm827Wh4d
+SBC37jANBgkqhkiG9w0BAQUFAAOCAQEAOGy3iPGt+lg3dNHocN6cJ1nL5BXXoMNg
+14iABMUwTD3UGusGXllH5rxmy+AI/Og17GJ9ysDawXiv5UZv+4mCI4/211NmVaDe
+JRI7cTYWVRJ2+z34VFsxugAG+H1V5ad2g6pcSpemKijfvcZsCyOVjjN/Hl5AHxNU
+LJzltQ7dFyiuawHTUin1Ih+QOfTcYmjwPIZH7LgFRbu3DJaUxmfLI3HQjnQi1kHr
+A6i26r7EARK1s11AdgYg1GS4KUYGis4fk5oQ7vuqWrTcL9Ury/bXBYSYBZELhPc9
++tb5evosFeo2gkO3t7jj83EB7UNDogVFwygFBzXjAaU4HoDU18PZ3g==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL
+MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl
+eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT
+JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx
+MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT
+Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg
+VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm
+aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo
+I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng
+o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G
+A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD
+VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB
+zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW
+RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB
+iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl
+cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV
+BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw
+MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV
+BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU
+aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy
+dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
+AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B
+3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY
+tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/
+Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2
+VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT
+79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6
+c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT
+Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l
+c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee
+UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE
+Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd
+BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G
+A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF
+Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO
+VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3
+ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs
+8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR
+iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze
+Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ
+XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/
+qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB
+VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB
+L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG
+jjxDah2nGN59PRbxYvnKkKj9
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB
+kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug
+Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho
+dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw
+IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD
+VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu
+dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN
+BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6
+E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ
+D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK
+4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq
+lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW
+bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB
+o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT
+MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js
+LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr
+BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB
+AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft
+Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj
+j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH
+KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv
+2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3
+mfnGV/TJVTl4uix5yaaIK/QI
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEojCCA4qgAwIBAgIQRL4Mi1AAJLQR0zYlJWfJiTANBgkqhkiG9w0BAQUFADCB
+rjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug
+Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho
+dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xNjA0BgNVBAMTLVVUTi1VU0VSRmlyc3Qt
+Q2xpZW50IEF1dGhlbnRpY2F0aW9uIGFuZCBFbWFpbDAeFw05OTA3MDkxNzI4NTBa
+Fw0xOTA3MDkxNzM2NThaMIGuMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVVQxFzAV
+BgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5l
+dHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cudXNlcnRydXN0LmNvbTE2MDQGA1UE
+AxMtVVROLVVTRVJGaXJzdC1DbGllbnQgQXV0aGVudGljYXRpb24gYW5kIEVtYWls
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsjmFpPJ9q0E7YkY3rs3B
+YHW8OWX5ShpHornMSMxqmNVNNRm5pELlzkniii8efNIxB8dOtINknS4p1aJkxIW9
+hVE1eaROaJB7HHqkkqgX8pgV8pPMyaQylbsMTzC9mKALi+VuG6JG+ni8om+rWV6l
+L8/K2m2qL+usobNqqrcuZzWLeeEeaYji5kbNoKXqvgvOdjp6Dpvq/NonWz1zHyLm
+SGHGTPNpsaguG7bUMSAsvIKKjqQOpdeJQ/wWWq8dcdcRWdq6hw2v+vPhwvCkxWeM
+1tZUOt4KpLoDd7NlyP0e03RiqhjKaJMeoYV+9Udly/hNVyh00jT/MLbu9mIwFIws
+6wIDAQABo4G5MIG2MAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud
+DgQWBBSJgmd9xJ0mcABLtFBIfN49rgRufTBYBgNVHR8EUTBPME2gS6BJhkdodHRw
+Oi8vY3JsLnVzZXJ0cnVzdC5jb20vVVROLVVTRVJGaXJzdC1DbGllbnRBdXRoZW50
+aWNhdGlvbmFuZEVtYWlsLmNybDAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH
+AwQwDQYJKoZIhvcNAQEFBQADggEBALFtYV2mGn98q0rkMPxTbyUkxsrt4jFcKw7u
+7mFVbwQ+zznexRtJlOTrIEy05p5QLnLZjfWqo7NK2lYcYJeA3IKirUq9iiv/Cwm0
+xtcgBEXkzYABurorbs6q15L+5K/r9CYdFip/bDCVNy8zEqx/3cfREYxRmLLQo5HQ
+rfafnoOTHh1CuEava2bwm3/q4wMC5QJRwarVNZ1yQAOJujEdxRBoUp7fooXFXAim
+eOZTT7Hot9MUnpOmw2TjrH5xzbyf6QMbzPvprDHBr3wVdAKZw7JHpsIyYdfHb0gk
+USeh1YdV8nuPmD0Wnu51tvjQjvLzxq4oW6fw8zYX/MMF08oDSlQ=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCB
+lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug
+Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho
+dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt
+SGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgxOTIyWjCBlzELMAkG
+A1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEe
+MBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8v
+d3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdh
+cmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn
+0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlIwrthdBKWHTxqctU8EGc6Oe0rE81m65UJ
+M6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFdtqdt++BxF2uiiPsA3/4a
+MXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8i4fDidNd
+oI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqI
+DsjfPe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9Ksy
+oUhbAgMBAAGjgbkwgbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYD
+VR0OBBYEFKFyXyYbKJhDlV0HN9WFlp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0
+dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LUhhcmR3YXJlLmNy
+bDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEF
+BQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM
+//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28Gpgoiskli
+CE7/yMgUsogWXecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gE
+CJChicsZUN/KHAG8HQQZexB2lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t
+3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kniCrVWFCVH/A7HFe7fRQ5YiuayZSS
+KqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67nfhmqA==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEZjCCA06gAwIBAgIQRL4Mi1AAJLQR0zYt4LNfGzANBgkqhkiG9w0BAQUFADCB
+lTELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug
+Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho
+dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHTAbBgNVBAMTFFVUTi1VU0VSRmlyc3Qt
+T2JqZWN0MB4XDTk5MDcwOTE4MzEyMFoXDTE5MDcwOTE4NDAzNlowgZUxCzAJBgNV
+BAYTAlVTMQswCQYDVQQIEwJVVDEXMBUGA1UEBxMOU2FsdCBMYWtlIENpdHkxHjAc
+BgNVBAoTFVRoZSBVU0VSVFJVU1QgTmV0d29yazEhMB8GA1UECxMYaHR0cDovL3d3
+dy51c2VydHJ1c3QuY29tMR0wGwYDVQQDExRVVE4tVVNFUkZpcnN0LU9iamVjdDCC
+ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6qgT+jo2F4qjEAVZURnicP
+HxzfOpuCaDDASmEd8S8O+r5596Uj71VRloTN2+O5bj4x2AogZ8f02b+U60cEPgLO
+KqJdhwQJ9jCdGIqXsqoc/EHSoTbL+z2RuufZcDX65OeQw5ujm9M89RKZd7G3CeBo
+5hy485RjiGpq/gt2yb70IuRnuasaXnfBhQfdDWy/7gbHd2pBnqcP1/vulBe3/IW+
+pKvEHDHd17bR5PDv3xaPslKT16HUiaEHLr/hARJCHhrh2JU022R5KP+6LhHC5ehb
+kkj7RwvCbNqtMoNB86XlQXD9ZZBt+vpRxPm9lisZBCzTbafc8H9vg2XiaquHhnUC
+AwEAAaOBrzCBrDALBgNVHQ8EBAMCAcYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
+FgQU2u1kdBScFDyr3ZmpvVsoTYs8ydgwQgYDVR0fBDswOTA3oDWgM4YxaHR0cDov
+L2NybC51c2VydHJ1c3QuY29tL1VUTi1VU0VSRmlyc3QtT2JqZWN0LmNybDApBgNV
+HSUEIjAgBggrBgEFBQcDAwYIKwYBBQUHAwgGCisGAQQBgjcKAwQwDQYJKoZIhvcN
+AQEFBQADggEBAAgfUrE3RHjb/c652pWWmKpVZIC1WkDdIaXFwfNfLEzIR1pp6ujw
+NTX00CXzyKakh0q9G7FzCL3Uw8q2NbtZhncxzaeAFK4T7/yxSPlrJSUtUbYsbUXB
+mMiKVl0+7kNOPmsnjtA6S4ULX9Ptaqd1y9Fahy85dRNacrACgZ++8A+EVCBibGnU
+4U3GDZlDAQ0Slox4nb9QorFEqmrPF3rPbw/U+CRVX/A0FklmPlBGyWNxODFiuGK5
+81OtbLUrohKqGU8J2l7nk8aOFAj+8DCAGKCGhU3IfdeLA/5u1fedFqySLKAj5ZyR
+Uh+U3xeUc8OzwcFxBSAAeL0TUh2oPs0AH8g=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEGjCCAwICEQCLW3VWhFSFCwDPrzhIzrGkMA0GCSqGSIb3DQEBBQUAMIHKMQsw
+CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl
+cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu
+LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT
+aWduIENsYXNzIDEgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp
+dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD
+VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT
+aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ
+bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu
+IENsYXNzIDEgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
+LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN2E1Lm0+afY8wR4
+nN493GwTFtl63SRRZsDHJlkNrAYIwpTRMx/wgzUfbhvI3qpuFU5UJ+/EbRrsC+MO
+8ESlV8dAWB6jRx9x7GD2bZTIGDnt/kIYVt/kTEkQeE4BdjVjEjbdZrwBBDajVWjV
+ojYJrKshJlQGrT/KFOCsyq0GHZXi+J3x4GD/wn91K0zM2v6HmSHquv4+VNfSWXjb
+PG7PoBMAGrgnoeS+Z5bKoMWznN3JdZ7rMJpfo83ZrngZPyPpXNspva1VyBtUjGP2
+6KbqxzcSXKMpHgLZ2x87tNcPVkeBFQRKr4Mn0cVYiMHd9qqnoxjaaKptEVHhv2Vr
+n5Z20T0CAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAq2aN17O6x5q25lXQBfGfMY1a
+qtmqRiYPce2lrVNWYgFHKkTp/j90CxObufRNG7LRX7K20ohcs5/Ny9Sn2WCVhDr4
+wTcdYcrnsMXlkdpUpqwxga6X3s0IrLjAl4B/bnKk52kTlWUfxJM8/XmPBNQ+T+r3
+ns7NZ3xPZQL/kYVUc8f/NveGLezQXk//EZ9yBta4GvFMDSZl4kSAHsef493oCtrs
+pSCAaWihT37ha88HQfqDjrw43bAuEbFrskLMmrz5SCJ5ShkPshw+IHTZasO+8ih4
+E1Z5T21Q6huwtVexN2ZYI/PcD98Kh8TvhgXVOBRgmaNL3gaWcSzy27YfpO8/7g==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEGTCCAwECEGFwy0mMX5hFKeewptlQW3owDQYJKoZIhvcNAQEFBQAwgcoxCzAJ
+BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjEfMB0GA1UECxMWVmVy
+aVNpZ24gVHJ1c3QgTmV0d29yazE6MDgGA1UECxMxKGMpIDE5OTkgVmVyaVNpZ24s
+IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTFFMEMGA1UEAxM8VmVyaVNp
+Z24gQ2xhc3MgMiBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0
+eSAtIEczMB4XDTk5MTAwMTAwMDAwMFoXDTM2MDcxNjIzNTk1OVowgcoxCzAJBgNV
+BAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjEfMB0GA1UECxMWVmVyaVNp
+Z24gVHJ1c3QgTmV0d29yazE6MDgGA1UECxMxKGMpIDE5OTkgVmVyaVNpZ24sIElu
+Yy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTFFMEMGA1UEAxM8VmVyaVNpZ24g
+Q2xhc3MgMiBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAt
+IEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArwoNwtUs22e5LeWU
+J92lvuCwTY+zYVY81nzD9M0+hsuiiOLh2KRpxbXiv8GmR1BeRjmL1Za6tW8UvxDO
+JxOeBUebMXoT2B/Z0wI3i60sR/COgQanDTAM6/c8DyAd3HJG7qUCyFvDyVZpTMUY
+wZF7C9UTAJu878NIPkZgIIUq1ZC2zYugzDLdt/1AVbJQHFauzI13TccgTacxdu9o
+koqQHgiBVrKtaaNS0MscxCM9H5n+TOgWY47GCI72MfbS+uV23bUckqNJzc0BzWjN
+qWm6o+sdDZykIKbBoMXRRkwXbdKsZj+WjOCE1Db/IlnF+RFgqF8EffIa9iVCYQ/E
+Srg+iQIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQA0JhU8wI1NQ0kdvekhktdmnLfe
+xbjQ5F1fdiLAJvmEOjr5jLX77GDx6M4EsMjdpwOPMPOY36TmpDHf0xwLRtxyID+u
+7gU8pDM/CzmscHhzS5kr3zDCVLCoO1Wh/hYozUK9dG6A2ydEp85EXdQbkJgNHkKU
+sQAsBNB0owIFImNjzYO1+8FtYmtpdf1dcEG59b98377BMnMiIYtYgXsVkXq642RI
+sH/7NiXaldDxJBQX3RiAa0YjOVT1jmIJBB2UkKab5iXiQkWquJCtvgiPqQtCGJTP
+cjnhsUPgKM+351psE2tJs//jGHyJizNdrDPXp/naOlXJWBD5qu9ats9LS98q
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw
+CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl
+cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu
+LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT
+aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp
+dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD
+VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT
+aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ
+bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu
+IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
+LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b
+N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t
+KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu
+kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm
+CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ
+Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu
+imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te
+2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe
+DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC
+/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p
+F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt
+TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjEL
+MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW
+ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2ln
+biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp
+U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y
+aXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjELMAkG
+A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJp
+U2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwg
+SW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2ln
+biBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5
+IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8Utpkmw4tXNherJI9/gHm
+GUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGzrl0Bp3ve
+fLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUw
+AwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJ
+aW1hZ2UvZ2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYj
+aHR0cDovL2xvZ28udmVyaXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMW
+kf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMDA2gAMGUCMGYhDBgmYFo4e1ZC
+4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIxAJw9SDkjOVga
+FRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB
+yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL
+ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp
+U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW
+ZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0
+aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL
+MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW
+ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln
+biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp
+U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y
+aXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1
+nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex
+t0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz
+SdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG
+BO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+
+rCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/
+NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E
+BAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH
+BgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy
+aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv
+MzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE
+p6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y
+5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK
+WE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ
+4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N
+hnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB
+vTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL
+ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp
+U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W
+ZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe
+Fw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX
+MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0
+IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y
+IGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh
+bCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF
+9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH
+H26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H
+LL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN
+/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT
+rJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud
+EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw
+WTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs
+exkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud
+DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4
+sAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+
+seQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz
+4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+
+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR
+lRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3
+7M2CYfE45k+XmCpajQ==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBr
+MQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRl
+cm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv
+bW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2WhcNMjIwNjI0MDAxNjEyWjBrMQsw
+CQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5h
+dGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1l
+cmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h
+2mCxlCfLF9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4E
+lpF7sDPwsRROEW+1QK8bRaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdV
+ZqW1LS7YgFmypw23RuwhY/81q6UCzyr0TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq
+299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI/k4+oKsGGelT84ATB+0t
+vz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzsGHxBvfaL
+dXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD
+AgEGMB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUF
+AAOCAQEAX/FBfXxcCLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcR
+zCSs00Rsca4BIGsDoo8Ytyk6feUWYFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3
+LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pzzkWKsKZJ/0x9nXGIxHYdkFsd
+7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBuYQa7FkKMcPcw
+++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt
+398znM/jra6O1I7mT1GvFpLgXPYHDw==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIID+TCCAuGgAwIBAgIQW1fXqEywr9nTb0ugMbTW4jANBgkqhkiG9w0BAQUFADB5
+MQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRl
+cm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xKjAoBgNVBAMTIVZpc2EgSW5m
+b3JtYXRpb24gRGVsaXZlcnkgUm9vdCBDQTAeFw0wNTA2MjcxNzQyNDJaFw0yNTA2
+MjkxNzQyNDJaMHkxCzAJBgNVBAYTAlVTMQ0wCwYDVQQKEwRWSVNBMS8wLQYDVQQL
+EyZWaXNhIEludGVybmF0aW9uYWwgU2VydmljZSBBc3NvY2lhdGlvbjEqMCgGA1UE
+AxMhVmlzYSBJbmZvcm1hdGlvbiBEZWxpdmVyeSBSb290IENBMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyREA4R/QkkfpLx0cYjga/EhIPZpchH0MZsRZ
+FfP6C2ITtf/Wc+MtgD4yTK0yoiXvni3d+aCtEgK3GDvkdgYrgF76ROJFZwUQjQ9l
+x42gRT05DbXvWFoy7dTglCZ9z/Tt2Cnktv9oxKgmkeHY/CyfpCBg1S8xth2JlGMR
+0ug/GMO5zANuegZOv438p5Lt5So+du2Gl+RMFQqEPwqN5uJSqAe0VtmB4gWdQ8on
+Bj2ZAM2R73QW7UW0Igt2vA4JaSiNtaAG/Y/58VXWHGgbq7rDtNK1R30X0kJV0rGA
+ib3RSwB3LpG7bOjbIucV5mQgJoVjoA1e05w6g1x/KmNTmOGRVwIDAQABo30wezAP
+BgNVHRMBAf8EBTADAQH/MDkGA1UdIAQyMDAwLgYFZ4EDAgEwJTAVBggrBgEFBQcC
+ARYJMS4yLjMuNC41MAwGCCsGAQUFBwICMAAwDgYDVR0PAQH/BAQDAgEGMB0GA1Ud
+DgQWBBRPitp2/2d3I5qmgH1924h1hfeBejANBgkqhkiG9w0BAQUFAAOCAQEACUW1
+QdUHdDJydgDPmYt+telnG/Su+DPaf1cregzlN43bJaJosMP7NwjoJY/H2He4XLWb
+5rXEkl+xH1UyUwF7mtaUoxbGxEvt8hPZSTB4da2mzXgwKvXuHyzF5Qjy1hOB0/pS
+WaF9ARpVKJJ7TOJQdGKBsF2Ty4fSCLqZLgfxbqwMsd9sysXI3rDXjIhekqvbgeLz
+PqZr+pfgFhwCCLSMQWl5Ll3u7Qk9wR094DZ6jj6+JCVCRUS3HyabH4OlM0Vc2K+j
+INsF/64Or7GNtRf9HYEJvrPxHINxl3JVwhYj4ASeaO4KwhVbwtw94Tc/XrGcexDo
+c5lC3rAi4/UZqweYCw==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEGjCCAwKgAwIBAgIDAYagMA0GCSqGSIb3DQEBBQUAMIGjMQswCQYDVQQGEwJG
+STEQMA4GA1UECBMHRmlubGFuZDEhMB8GA1UEChMYVmFlc3RvcmVraXN0ZXJpa2Vz
+a3VzIENBMSkwJwYDVQQLEyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSBTZXJ2aWNl
+czEZMBcGA1UECxMQVmFybWVubmVwYWx2ZWx1dDEZMBcGA1UEAxMQVlJLIEdvdi4g
+Um9vdCBDQTAeFw0wMjEyMTgxMzUzMDBaFw0yMzEyMTgxMzUxMDhaMIGjMQswCQYD
+VQQGEwJGSTEQMA4GA1UECBMHRmlubGFuZDEhMB8GA1UEChMYVmFlc3RvcmVraXN0
+ZXJpa2Vza3VzIENBMSkwJwYDVQQLEyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSBT
+ZXJ2aWNlczEZMBcGA1UECxMQVmFybWVubmVwYWx2ZWx1dDEZMBcGA1UEAxMQVlJL
+IEdvdi4gUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALCF
+FdrIAzfQo0Y3bBseljDCWoUSZyPyu5/nioFgJ/gTqTy894aqqvTzJSm0/nWuHoGG
+igWyHWWyOOi0zCia+xc28ZPVec7Bg4shT8MNrUHfeJ1I4x9CRPw8bSEga60ihCRC
+jxdNwlAfZM0tOSJWiP2yY51U2kJpwMhP1xjiPshphJQ9LIDGfM6911Mf64i5psu7
+hVfvV3ZdDIvTXhJBnyHAOfQmbQj6OLOhd7HuFtjQaNq0mKWgZUZKa41+qk1guPjI
+DfxxPu45h4G02fhukO4/DmHXHSto5i7hQkQmeCxY8n0Wf2HASSQqiYe2XS8pGfim
+545SnkFLWg6quMJmQlMCAwEAAaNVMFMwDwYDVR0TAQH/BAUwAwEB/zARBglghkgB
+hvhCAQEEBAMCAAcwDgYDVR0PAQH/BAQDAgHGMB0GA1UdDgQWBBTb6eGb0tEkC/yr
+46Bn6q6cS3f0sDANBgkqhkiG9w0BAQUFAAOCAQEArX1ID1QRnljurw2bEi8hpM2b
+uoRH5sklVSPj3xhYKizbXvfNVPVRJHtiZ+GxH0mvNNDrsczZog1Sf0JLiGCXzyVy
+t08pLWKfT6HAVVdWDsRol5EfnGTCKTIB6dTI2riBmCguGMcs/OubUpbf9MiQGS0j
+8/G7cdqehSO9Gu8u5Hp5t8OdhkktY7ktdM9lDzJmid87Ie4pbzlj2RXBbvbfgD5Q
+eBmK3QOjFKU3p7UsfLYRh+cF8ry23tT/l4EohP7+bEaFEEGfTXWMB9SZZ291im/k
+UJL2mdUQuMSpe/cXjUu/15WfCdxEDx4yw8DP03kN5Mc7h/CQNIghYkmSBAQfvA==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB
+gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk
+MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY
+UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx
+NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3
+dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy
+dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB
+dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6
+38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP
+KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q
+DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4
+qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa
+JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi
+PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P
+BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs
+jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0
+eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD
+ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR
+vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt
+qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa
+IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy
+i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ
+O+7ETPTsJ3xCwnR8gooJybQDJbw=
+-----END CERTIFICATE-----
+`
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_js.go b/vendor/github.com/google/certificate-transparency-go/x509/root_js.go
new file mode 100644
index 00000000000..4240207a0a0
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_js.go
@@ -0,0 +1,19 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build js && wasm
+// +build js,wasm
+
+package x509
+
+// Possible certificate files; stop after finding one.
+var certFiles = []string{}
+
+func loadSystemRoots() (*CertPool, error) {
+ return NewCertPool(), nil
+}
+
+func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
+ return nil, nil
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_linux.go b/vendor/github.com/google/certificate-transparency-go/x509/root_linux.go
new file mode 100644
index 00000000000..267775dc5f0
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_linux.go
@@ -0,0 +1,15 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+// Possible certificate files; stop after finding one.
+var certFiles = []string{
+ "/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
+ "/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL 6
+ "/etc/ssl/ca-bundle.pem", // OpenSUSE
+ "/etc/pki/tls/cacert.pem", // OpenELEC
+ "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", // CentOS/RHEL 7
+ "/etc/ssl/cert.pem", // Alpine Linux
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_nocgo_darwin.go b/vendor/github.com/google/certificate-transparency-go/x509/root_nocgo_darwin.go
new file mode 100644
index 00000000000..2ee1d5ce800
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_nocgo_darwin.go
@@ -0,0 +1,12 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !cgo
+// +build !cgo
+
+package x509
+
+func loadSystemRoots() (*CertPool, error) {
+ return execSecurityRoots()
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_plan9.go b/vendor/github.com/google/certificate-transparency-go/x509/root_plan9.go
new file mode 100644
index 00000000000..2bdb2fe7136
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_plan9.go
@@ -0,0 +1,40 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build plan9
+// +build plan9
+
+package x509
+
+import (
+ "os"
+)
+
+// Possible certificate files; stop after finding one.
+var certFiles = []string{
+ "/sys/lib/tls/ca.pem",
+}
+
+func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
+ return nil, nil
+}
+
+func loadSystemRoots() (*CertPool, error) {
+ roots := NewCertPool()
+ var bestErr error
+ for _, file := range certFiles {
+ data, err := os.ReadFile(file)
+ if err == nil {
+ roots.AppendCertsFromPEM(data)
+ return roots, nil
+ }
+ if bestErr == nil || (os.IsNotExist(bestErr) && !os.IsNotExist(err)) {
+ bestErr = err
+ }
+ }
+ if bestErr == nil {
+ return roots, nil
+ }
+ return nil, bestErr
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_solaris.go b/vendor/github.com/google/certificate-transparency-go/x509/root_solaris.go
new file mode 100644
index 00000000000..e6d4e613994
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_solaris.go
@@ -0,0 +1,12 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+// Possible certificate files; stop after finding one.
+var certFiles = []string{
+ "/etc/certs/ca-certificates.crt", // Solaris 11.2+
+ "/etc/ssl/certs/ca-certificates.crt", // Joyent SmartOS
+ "/etc/ssl/cacert.pem", // OmniOS
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_unix.go b/vendor/github.com/google/certificate-transparency-go/x509/root_unix.go
new file mode 100644
index 00000000000..ac2d01d9798
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_unix.go
@@ -0,0 +1,89 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+// +build aix dragonfly freebsd linux netbsd openbsd solaris zos
+
+package x509
+
+import (
+ "os"
+)
+
+// Possible directories with certificate files; stop after successfully
+// reading at least one file from a directory.
+var certDirectories = []string{
+ "/etc/ssl/certs", // SLES10/SLES11, https://golang.org/issue/12139
+ "/system/etc/security/cacerts", // Android
+ "/usr/local/share/certs", // FreeBSD
+ "/etc/pki/tls/certs", // Fedora/RHEL
+ "/etc/openssl/certs", // NetBSD
+ "/var/ssl/certs", // AIX
+}
+
+const (
+ // certFileEnv is the environment variable which identifies where to locate
+ // the SSL certificate file. If set this overrides the system default.
+ certFileEnv = "SSL_CERT_FILE"
+
+ // certDirEnv is the environment variable which identifies which directory
+ // to check for SSL certificate files. If set this overrides the system default.
+ certDirEnv = "SSL_CERT_DIR"
+)
+
+func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
+ return nil, nil
+}
+
+func loadSystemRoots() (*CertPool, error) {
+ roots := NewCertPool()
+
+ files := certFiles
+ if f := os.Getenv(certFileEnv); f != "" {
+ files = []string{f}
+ }
+
+ var firstErr error
+ for _, file := range files {
+ data, err := os.ReadFile(file)
+ if err == nil {
+ roots.AppendCertsFromPEM(data)
+ break
+ }
+ if firstErr == nil && !os.IsNotExist(err) {
+ firstErr = err
+ }
+ }
+
+ dirs := certDirectories
+ if d := os.Getenv(certDirEnv); d != "" {
+ dirs = []string{d}
+ }
+
+ for _, directory := range dirs {
+ fis, err := os.ReadDir(directory)
+ if err != nil {
+ if firstErr == nil && !os.IsNotExist(err) {
+ firstErr = err
+ }
+ continue
+ }
+ rootsAdded := false
+ for _, fi := range fis {
+ data, err := os.ReadFile(directory + "/" + fi.Name())
+ if err == nil && roots.AppendCertsFromPEM(data) {
+ rootsAdded = true
+ }
+ }
+ if rootsAdded {
+ return roots, nil
+ }
+ }
+
+ if len(roots.certs) > 0 || firstErr == nil {
+ return roots, nil
+ }
+
+ return nil, firstErr
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_wasip1.go b/vendor/github.com/google/certificate-transparency-go/x509/root_wasip1.go
new file mode 100644
index 00000000000..e5cf98e0af0
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_wasip1.go
@@ -0,0 +1,19 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build wasip1
+// +build wasip1
+
+package x509
+
+// Possible certificate files; stop after finding one.
+var certFiles = []string{}
+
+func loadSystemRoots() (*CertPool, error) {
+ return NewCertPool(), nil
+}
+
+func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
+ return nil, nil
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_windows.go b/vendor/github.com/google/certificate-transparency-go/x509/root_windows.go
new file mode 100644
index 00000000000..39ec95ef3aa
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_windows.go
@@ -0,0 +1,286 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ "errors"
+ "syscall"
+ "unsafe"
+)
+
+// Creates a new *syscall.CertContext representing the leaf certificate in an in-memory
+// certificate store containing itself and all of the intermediate certificates specified
+// in the opts.Intermediates CertPool.
+//
+// A pointer to the in-memory store is available in the returned CertContext's Store field.
+// The store is automatically freed when the CertContext is freed using
+// syscall.CertFreeCertificateContext.
+func createStoreContext(leaf *Certificate, opts *VerifyOptions) (*syscall.CertContext, error) {
+ var storeCtx *syscall.CertContext
+
+ leafCtx, err := syscall.CertCreateCertificateContext(syscall.X509_ASN_ENCODING|syscall.PKCS_7_ASN_ENCODING, &leaf.Raw[0], uint32(len(leaf.Raw)))
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.CertFreeCertificateContext(leafCtx)
+
+ handle, err := syscall.CertOpenStore(syscall.CERT_STORE_PROV_MEMORY, 0, 0, syscall.CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.CertCloseStore(handle, 0)
+
+ err = syscall.CertAddCertificateContextToStore(handle, leafCtx, syscall.CERT_STORE_ADD_ALWAYS, &storeCtx)
+ if err != nil {
+ return nil, err
+ }
+
+ if opts.Intermediates != nil {
+ for _, intermediate := range opts.Intermediates.certs {
+ ctx, err := syscall.CertCreateCertificateContext(syscall.X509_ASN_ENCODING|syscall.PKCS_7_ASN_ENCODING, &intermediate.Raw[0], uint32(len(intermediate.Raw)))
+ if err != nil {
+ return nil, err
+ }
+
+ err = syscall.CertAddCertificateContextToStore(handle, ctx, syscall.CERT_STORE_ADD_ALWAYS, nil)
+ syscall.CertFreeCertificateContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return storeCtx, nil
+}
+
+// extractSimpleChain extracts the final certificate chain from a CertSimpleChain.
+func extractSimpleChain(simpleChain **syscall.CertSimpleChain, count int) (chain []*Certificate, err error) {
+ if simpleChain == nil || count == 0 {
+ return nil, errors.New("x509: invalid simple chain")
+ }
+
+ simpleChains := (*[1 << 20]*syscall.CertSimpleChain)(unsafe.Pointer(simpleChain))[:count:count]
+ lastChain := simpleChains[count-1]
+ elements := (*[1 << 20]*syscall.CertChainElement)(unsafe.Pointer(lastChain.Elements))[:lastChain.NumElements:lastChain.NumElements]
+ for i := 0; i < int(lastChain.NumElements); i++ {
+ // Copy the buf, since ParseCertificate does not create its own copy.
+ cert := elements[i].CertContext
+ encodedCert := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:cert.Length:cert.Length]
+ buf := make([]byte, cert.Length)
+ copy(buf, encodedCert)
+ parsedCert, err := ParseCertificate(buf)
+ if err != nil {
+ return nil, err
+ }
+ chain = append(chain, parsedCert)
+ }
+
+ return chain, nil
+}
+
+// checkChainTrustStatus checks the trust status of the certificate chain, translating
+// any errors it finds into Go errors in the process.
+func checkChainTrustStatus(c *Certificate, chainCtx *syscall.CertChainContext) error {
+ if chainCtx.TrustStatus.ErrorStatus != syscall.CERT_TRUST_NO_ERROR {
+ status := chainCtx.TrustStatus.ErrorStatus
+ switch status {
+ case syscall.CERT_TRUST_IS_NOT_TIME_VALID:
+ return CertificateInvalidError{c, Expired, ""}
+ default:
+ return UnknownAuthorityError{c, nil, nil}
+ }
+ }
+ return nil
+}
+
+// checkChainSSLServerPolicy checks that the certificate chain in chainCtx is valid for
+// use as a certificate chain for a SSL/TLS server.
+func checkChainSSLServerPolicy(c *Certificate, chainCtx *syscall.CertChainContext, opts *VerifyOptions) error {
+ servernamep, err := syscall.UTF16PtrFromString(opts.DNSName)
+ if err != nil {
+ return err
+ }
+ sslPara := &syscall.SSLExtraCertChainPolicyPara{
+ AuthType: syscall.AUTHTYPE_SERVER,
+ ServerName: servernamep,
+ }
+ sslPara.Size = uint32(unsafe.Sizeof(*sslPara))
+
+ para := &syscall.CertChainPolicyPara{
+ ExtraPolicyPara: convertToPolicyParaType(unsafe.Pointer(sslPara)),
+ }
+ para.Size = uint32(unsafe.Sizeof(*para))
+
+ status := syscall.CertChainPolicyStatus{}
+ err = syscall.CertVerifyCertificateChainPolicy(syscall.CERT_CHAIN_POLICY_SSL, chainCtx, para, &status)
+ if err != nil {
+ return err
+ }
+
+ // TODO(mkrautz): use the lChainIndex and lElementIndex fields
+ // of the CertChainPolicyStatus to provide proper context, instead
+ // using c.
+ if status.Error != 0 {
+ switch status.Error {
+ case syscall.CERT_E_EXPIRED:
+ return CertificateInvalidError{c, Expired, ""}
+ case syscall.CERT_E_CN_NO_MATCH:
+ return HostnameError{c, opts.DNSName}
+ case syscall.CERT_E_UNTRUSTEDROOT:
+ return UnknownAuthorityError{c, nil, nil}
+ default:
+ return UnknownAuthorityError{c, nil, nil}
+ }
+ }
+
+ return nil
+}
+
+// systemVerify is like Verify, except that it uses CryptoAPI calls
+// to build certificate chains and verify them.
+func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
+ hasDNSName := opts != nil && len(opts.DNSName) > 0
+
+ storeCtx, err := createStoreContext(c, opts)
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.CertFreeCertificateContext(storeCtx)
+
+ para := new(syscall.CertChainPara)
+ para.Size = uint32(unsafe.Sizeof(*para))
+
+ // If there's a DNSName set in opts, assume we're verifying
+ // a certificate from a TLS server.
+ if hasDNSName {
+ oids := []*byte{
+ &syscall.OID_PKIX_KP_SERVER_AUTH[0],
+ // Both IE and Chrome allow certificates with
+ // Server Gated Crypto as well. Some certificates
+ // in the wild require them.
+ &syscall.OID_SERVER_GATED_CRYPTO[0],
+ &syscall.OID_SGC_NETSCAPE[0],
+ }
+ para.RequestedUsage.Type = syscall.USAGE_MATCH_TYPE_OR
+ para.RequestedUsage.Usage.Length = uint32(len(oids))
+ para.RequestedUsage.Usage.UsageIdentifiers = &oids[0]
+ } else {
+ para.RequestedUsage.Type = syscall.USAGE_MATCH_TYPE_AND
+ para.RequestedUsage.Usage.Length = 0
+ para.RequestedUsage.Usage.UsageIdentifiers = nil
+ }
+
+ var verifyTime *syscall.Filetime
+ if opts != nil && !opts.CurrentTime.IsZero() {
+ ft := syscall.NsecToFiletime(opts.CurrentTime.UnixNano())
+ verifyTime = &ft
+ }
+
+ // CertGetCertificateChain will traverse Windows's root stores
+ // in an attempt to build a verified certificate chain. Once
+ // it has found a verified chain, it stops. MSDN docs on
+ // CERT_CHAIN_CONTEXT:
+ //
+ // When a CERT_CHAIN_CONTEXT is built, the first simple chain
+ // begins with an end certificate and ends with a self-signed
+ // certificate. If that self-signed certificate is not a root
+ // or otherwise trusted certificate, an attempt is made to
+ // build a new chain. CTLs are used to create the new chain
+ // beginning with the self-signed certificate from the original
+ // chain as the end certificate of the new chain. This process
+ // continues building additional simple chains until the first
+ // self-signed certificate is a trusted certificate or until
+ // an additional simple chain cannot be built.
+ //
+ // The result is that we'll only get a single trusted chain to
+ // return to our caller.
+ var chainCtx *syscall.CertChainContext
+ err = syscall.CertGetCertificateChain(syscall.Handle(0), storeCtx, verifyTime, storeCtx.Store, para, 0, 0, &chainCtx)
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.CertFreeCertificateChain(chainCtx)
+
+ err = checkChainTrustStatus(c, chainCtx)
+ if err != nil {
+ return nil, err
+ }
+
+ if hasDNSName {
+ err = checkChainSSLServerPolicy(c, chainCtx, opts)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ chain, err := extractSimpleChain(chainCtx.Chains, int(chainCtx.ChainCount))
+ if err != nil {
+ return nil, err
+ }
+ if len(chain) < 1 {
+ return nil, errors.New("x509: internal error: system verifier returned an empty chain")
+ }
+
+ // Mitigate CVE-2020-0601, where the Windows system verifier might be
+ // tricked into using custom curve parameters for a trusted root, by
+ // double-checking all ECDSA signatures. If the system was tricked into
+ // using spoofed parameters, the signature will be invalid for the correct
+ // ones we parsed. (We don't support custom curves ourselves.)
+ for i, parent := range chain[1:] {
+ if parent.PublicKeyAlgorithm != ECDSA {
+ continue
+ }
+ if err := parent.CheckSignature(chain[i].SignatureAlgorithm,
+ chain[i].RawTBSCertificate, chain[i].Signature); err != nil {
+ return nil, err
+ }
+ }
+
+ return [][]*Certificate{chain}, nil
+}
+
+func loadSystemRoots() (*CertPool, error) {
+ // TODO: restore this functionality on Windows. We tried to do
+ // it in Go 1.8 but had to revert it. See Issue 18609.
+ // Returning (nil, nil) was the old behavior, prior to CL 30578.
+ // The if statement here avoids vet complaining about
+ // unreachable code below.
+ if true {
+ return nil, nil
+ }
+
+ const CRYPT_E_NOT_FOUND = 0x80092004
+
+ store, err := syscall.CertOpenSystemStore(0, syscall.StringToUTF16Ptr("ROOT"))
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.CertCloseStore(store, 0)
+
+ roots := NewCertPool()
+ var cert *syscall.CertContext
+ for {
+ cert, err = syscall.CertEnumCertificatesInStore(store, cert)
+ if err != nil {
+ if errno, ok := err.(syscall.Errno); ok {
+ if errno == CRYPT_E_NOT_FOUND {
+ break
+ }
+ }
+ return nil, err
+ }
+ if cert == nil {
+ break
+ }
+ // Copy the buf, since ParseCertificate does not create its own copy.
+ buf := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:cert.Length:cert.Length]
+ buf2 := make([]byte, cert.Length)
+ copy(buf2, buf)
+ if c, err := ParseCertificate(buf2); err == nil {
+ roots.AddCert(c)
+ }
+ }
+ return roots, nil
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/root_zos.go b/vendor/github.com/google/certificate-transparency-go/x509/root_zos.go
new file mode 100644
index 00000000000..54f240af07d
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/root_zos.go
@@ -0,0 +1,13 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build zos
+// +build zos
+
+package x509
+
+// Possible certificate files; stop after finding one.
+var certFiles = []string{
+ "/etc/cacert.pem", // IBM zOS default
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/rpki.go b/vendor/github.com/google/certificate-transparency-go/x509/rpki.go
new file mode 100644
index 00000000000..520d6dc3abd
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/rpki.go
@@ -0,0 +1,242 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+
+ "github.com/google/certificate-transparency-go/asn1"
+)
+
+// IPAddressPrefix describes an IP address prefix as an ASN.1 bit string,
+// where the BitLength field holds the prefix length.
+type IPAddressPrefix asn1.BitString
+
+// IPAddressRange describes an (inclusive) IP address range.
+type IPAddressRange struct {
+ Min IPAddressPrefix
+ Max IPAddressPrefix
+}
+
+// Most relevant values for AFI from:
+// http://www.iana.org/assignments/address-family-numbers.
+const (
+ IPv4AddressFamilyIndicator = uint16(1)
+ IPv6AddressFamilyIndicator = uint16(2)
+)
+
+// IPAddressFamilyBlocks describes a set of ranges of IP addresses.
+type IPAddressFamilyBlocks struct {
+ // AFI holds an address family indicator from
+ // http://www.iana.org/assignments/address-family-numbers.
+ AFI uint16
+ // SAFI holds a subsequent address family indicator from
+ // http://www.iana.org/assignments/safi-namespace.
+ SAFI byte
+ // InheritFromIssuer indicates that the set of addresses should
+ // be taken from the issuer's certificate.
+ InheritFromIssuer bool
+ // AddressPrefixes holds prefixes if InheritFromIssuer is false.
+ AddressPrefixes []IPAddressPrefix
+ // AddressRanges holds ranges if InheritFromIssuer is false.
+ AddressRanges []IPAddressRange
+}
+
+// Internal types for asn1 unmarshalling.
+type ipAddressFamily struct {
+ AddressFamily []byte // 2-byte AFI plus optional 1 byte SAFI
+ Choice asn1.RawValue
+}
+
+// Internally, use raw asn1.BitString rather than the IPAddressPrefix
+// type alias (so that asn1.Unmarshal() decodes properly).
+type ipAddressRange struct {
+ Min asn1.BitString
+ Max asn1.BitString
+}
+
+func parseRPKIAddrBlocks(data []byte, nfe *NonFatalErrors) []*IPAddressFamilyBlocks {
+ // RFC 3779 2.2.3
+ // IPAddrBlocks ::= SEQUENCE OF IPAddressFamily
+ //
+ // IPAddressFamily ::= SEQUENCE { -- AFI & optional SAFI --
+ // addressFamily OCTET STRING (SIZE (2..3)),
+ // ipAddressChoice IPAddressChoice }
+ //
+ // IPAddressChoice ::= CHOICE {
+ // inherit NULL, -- inherit from issuer --
+ // addressesOrRanges SEQUENCE OF IPAddressOrRange }
+ //
+ // IPAddressOrRange ::= CHOICE {
+ // addressPrefix IPAddress,
+ // addressRange IPAddressRange }
+ //
+ // IPAddressRange ::= SEQUENCE {
+ // min IPAddress,
+ // max IPAddress }
+ //
+ // IPAddress ::= BIT STRING
+
+ var addrBlocks []ipAddressFamily
+ if rest, err := asn1.Unmarshal(data, &addrBlocks); err != nil {
+ nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks extension: %v", err))
+ return nil
+ } else if len(rest) != 0 {
+ nfe.AddError(errors.New("trailing data after ipAddrBlocks extension"))
+ return nil
+ }
+
+ var results []*IPAddressFamilyBlocks
+ for i, block := range addrBlocks {
+ var fam IPAddressFamilyBlocks
+ if l := len(block.AddressFamily); l < 2 || l > 3 {
+ nfe.AddError(fmt.Errorf("invalid address family length (%d) for ipAddrBlock.addressFamily", l))
+ continue
+ }
+ fam.AFI = binary.BigEndian.Uint16(block.AddressFamily[0:2])
+ if len(block.AddressFamily) > 2 {
+ fam.SAFI = block.AddressFamily[2]
+ }
+ // IPAddressChoice is an ASN.1 CHOICE where the chosen alternative is indicated by (implicit)
+ // tagging of the alternatives -- here, either NULL or SEQUENCE OF.
+ if bytes.Equal(block.Choice.FullBytes, asn1.NullBytes) {
+ fam.InheritFromIssuer = true
+ results = append(results, &fam)
+ continue
+ }
+
+ var addrRanges []asn1.RawValue
+ if _, err := asn1.Unmarshal(block.Choice.FullBytes, &addrRanges); err != nil {
+ nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges: %v", i, err))
+ continue
+ }
+ for j, ar := range addrRanges {
+ // Each IPAddressOrRange is a CHOICE where the alternatives have distinct (implicit)
+ // tags -- here, either BIT STRING or SEQUENCE.
+ switch ar.Tag {
+ case asn1.TagBitString:
+ // BIT STRING for single prefix IPAddress
+ var val asn1.BitString
+ if _, err := asn1.Unmarshal(ar.FullBytes, &val); err != nil {
+ nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d].addressPrefix: %v", i, j, err))
+ continue
+ }
+ fam.AddressPrefixes = append(fam.AddressPrefixes, IPAddressPrefix(val))
+
+ case asn1.TagSequence:
+ var val ipAddressRange
+ if _, err := asn1.Unmarshal(ar.FullBytes, &val); err != nil {
+ nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d].addressRange: %v", i, j, err))
+ continue
+ }
+ fam.AddressRanges = append(fam.AddressRanges, IPAddressRange{Min: IPAddressPrefix(val.Min), Max: IPAddressPrefix(val.Max)})
+
+ default:
+ nfe.AddError(fmt.Errorf("unexpected ASN.1 type in ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d]: %+v", i, j, ar))
+ }
+ }
+ results = append(results, &fam)
+ }
+ return results
+}
+
+// ASIDRange describes an inclusive range of AS Identifiers (AS numbers or routing
+// domain identifiers).
+type ASIDRange struct {
+ Min int
+ Max int
+}
+
+// ASIdentifiers describes a collection of AS Identifiers (AS numbers or routing
+// domain identifiers).
+type ASIdentifiers struct {
+ // InheritFromIssuer indicates that the set of AS identifiers should
+ // be taken from the issuer's certificate.
+ InheritFromIssuer bool
+ // ASIDs holds AS identifiers if InheritFromIssuer is false.
+ ASIDs []int
+ // ASIDs holds AS identifier ranges (inclusive) if InheritFromIssuer is false.
+ ASIDRanges []ASIDRange
+}
+
+type asIdentifiers struct {
+ ASNum asn1.RawValue `asn1:"optional,tag:0"`
+ RDI asn1.RawValue `asn1:"optional,tag:1"`
+}
+
+func parseASIDChoice(val asn1.RawValue, nfe *NonFatalErrors) *ASIdentifiers {
+ // RFC 3779 2.3.2
+ // ASIdentifierChoice ::= CHOICE {
+ // inherit NULL, -- inherit from issuer --
+ // asIdsOrRanges SEQUENCE OF ASIdOrRange }
+ // ASIdOrRange ::= CHOICE {
+ // id ASId,
+ // range ASRange }
+ // ASRange ::= SEQUENCE {
+ // min ASId,
+ // max ASId }
+ // ASId ::= INTEGER
+ if len(val.FullBytes) == 0 { // OPTIONAL
+ return nil
+ }
+ // ASIdentifierChoice is an ASN.1 CHOICE where the chosen alternative is indicated by (implicit)
+ // tagging of the alternatives -- here, either NULL or SEQUENCE OF.
+ if bytes.Equal(val.Bytes, asn1.NullBytes) {
+ return &ASIdentifiers{InheritFromIssuer: true}
+ }
+ var ids []asn1.RawValue
+ if rest, err := asn1.Unmarshal(val.Bytes, &ids); err != nil {
+ nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges: %v", err))
+ return nil
+ } else if len(rest) != 0 {
+ nfe.AddError(errors.New("trailing data after ASIdentifiers.asIdsOrRanges"))
+ return nil
+ }
+ var asID ASIdentifiers
+ for i, id := range ids {
+ // Each ASIdOrRange is a CHOICE where the alternatives have distinct (implicit)
+ // tags -- here, either INTEGER or SEQUENCE.
+ switch id.Tag {
+ case asn1.TagInteger:
+ var val int
+ if _, err := asn1.Unmarshal(id.FullBytes, &val); err != nil {
+ nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges[%d].id: %v", i, err))
+ continue
+ }
+ asID.ASIDs = append(asID.ASIDs, val)
+
+ case asn1.TagSequence:
+ var val ASIDRange
+ if _, err := asn1.Unmarshal(id.FullBytes, &val); err != nil {
+ nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges[%d].range: %v", i, err))
+ continue
+ }
+ asID.ASIDRanges = append(asID.ASIDRanges, val)
+
+ default:
+ nfe.AddError(fmt.Errorf("unexpected value in ASIdentifiers.asIdsOrRanges[%d]: %+v", i, id))
+ }
+ }
+ return &asID
+}
+
+func parseRPKIASIdentifiers(data []byte, nfe *NonFatalErrors) (*ASIdentifiers, *ASIdentifiers) {
+ // RFC 3779 2.3.2
+ // ASIdentifiers ::= SEQUENCE {
+ // asnum [0] EXPLICIT ASIdentifierChoice OPTIONAL,
+ // rdi [1] EXPLICIT ASIdentifierChoice OPTIONAL}
+ var asIDs asIdentifiers
+ if rest, err := asn1.Unmarshal(data, &asIDs); err != nil {
+ nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers extension: %v", err))
+ return nil, nil
+ } else if len(rest) != 0 {
+ nfe.AddError(errors.New("trailing data after ASIdentifiers extension"))
+ return nil, nil
+ }
+ return parseASIDChoice(asIDs.ASNum, nfe), parseASIDChoice(asIDs.RDI, nfe)
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/sec1.go b/vendor/github.com/google/certificate-transparency-go/x509/sec1.go
new file mode 100644
index 00000000000..d19407079f3
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/sec1.go
@@ -0,0 +1,127 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "errors"
+ "fmt"
+ "math/big"
+
+ "github.com/google/certificate-transparency-go/asn1"
+)
+
+const ecPrivKeyVersion = 1
+
+// ecPrivateKey reflects an ASN.1 Elliptic Curve Private Key Structure.
+// References:
+//
+// RFC 5915
+// SEC1 - http://www.secg.org/sec1-v2.pdf
+//
+// Per RFC 5915 the NamedCurveOID is marked as ASN.1 OPTIONAL, however in
+// most cases it is not.
+type ecPrivateKey struct {
+ Version int
+ PrivateKey []byte
+ NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"`
+ PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"`
+}
+
+// ParseECPrivateKey parses an EC private key in SEC 1, ASN.1 DER form.
+//
+// This kind of key is commonly encoded in PEM blocks of type "EC PRIVATE KEY".
+func ParseECPrivateKey(der []byte) (*ecdsa.PrivateKey, error) {
+ return parseECPrivateKey(nil, der)
+}
+
+// MarshalECPrivateKey converts an EC private key to SEC 1, ASN.1 DER form.
+//
+// This kind of key is commonly encoded in PEM blocks of type "EC PRIVATE KEY".
+// For a more flexible key format which is not EC specific, use
+// MarshalPKCS8PrivateKey.
+func MarshalECPrivateKey(key *ecdsa.PrivateKey) ([]byte, error) {
+ oid, ok := OIDFromNamedCurve(key.Curve)
+ if !ok {
+ return nil, errors.New("x509: unknown elliptic curve")
+ }
+
+ return marshalECPrivateKeyWithOID(key, oid)
+}
+
+// marshalECPrivateKey marshals an EC private key into ASN.1, DER format and
+// sets the curve ID to the given OID, or omits it if OID is nil.
+func marshalECPrivateKeyWithOID(key *ecdsa.PrivateKey, oid asn1.ObjectIdentifier) ([]byte, error) {
+ privateKeyBytes := key.D.Bytes()
+ paddedPrivateKey := make([]byte, (key.Curve.Params().N.BitLen()+7)/8)
+ copy(paddedPrivateKey[len(paddedPrivateKey)-len(privateKeyBytes):], privateKeyBytes)
+
+ return asn1.Marshal(ecPrivateKey{
+ Version: 1,
+ PrivateKey: paddedPrivateKey,
+ NamedCurveOID: oid,
+ PublicKey: asn1.BitString{Bytes: elliptic.Marshal(key.Curve, key.X, key.Y)},
+ })
+}
+
+// parseECPrivateKey parses an ASN.1 Elliptic Curve Private Key Structure.
+// The OID for the named curve may be provided from another source (such as
+// the PKCS8 container) - if it is provided then use this instead of the OID
+// that may exist in the EC private key structure.
+func parseECPrivateKey(namedCurveOID *asn1.ObjectIdentifier, der []byte) (key *ecdsa.PrivateKey, err error) {
+ var privKey ecPrivateKey
+ if _, err := asn1.Unmarshal(der, &privKey); err != nil {
+ if _, err := asn1.Unmarshal(der, &pkcs8{}); err == nil {
+ return nil, errors.New("x509: failed to parse private key (use ParsePKCS8PrivateKey instead for this key format)")
+ }
+ if _, err := asn1.Unmarshal(der, &pkcs1PrivateKey{}); err == nil {
+ return nil, errors.New("x509: failed to parse private key (use ParsePKCS1PrivateKey instead for this key format)")
+ }
+ return nil, errors.New("x509: failed to parse EC private key: " + err.Error())
+ }
+ if privKey.Version != ecPrivKeyVersion {
+ return nil, fmt.Errorf("x509: unknown EC private key version %d", privKey.Version)
+ }
+
+ var nfe NonFatalErrors
+ var curve elliptic.Curve
+ if namedCurveOID != nil {
+ curve = namedCurveFromOID(*namedCurveOID, &nfe)
+ } else {
+ curve = namedCurveFromOID(privKey.NamedCurveOID, &nfe)
+ }
+ if curve == nil {
+ return nil, errors.New("x509: unknown elliptic curve")
+ }
+
+ k := new(big.Int).SetBytes(privKey.PrivateKey)
+ curveOrder := curve.Params().N
+ if k.Cmp(curveOrder) >= 0 {
+ return nil, errors.New("x509: invalid elliptic curve private key value")
+ }
+ priv := new(ecdsa.PrivateKey)
+ priv.Curve = curve
+ priv.D = k
+
+ privateKey := make([]byte, (curveOrder.BitLen()+7)/8)
+
+ // Some private keys have leading zero padding. This is invalid
+ // according to [SEC1], but this code will ignore it.
+ for len(privKey.PrivateKey) > len(privateKey) {
+ if privKey.PrivateKey[0] != 0 {
+ return nil, errors.New("x509: invalid private key length")
+ }
+ privKey.PrivateKey = privKey.PrivateKey[1:]
+ }
+
+ // Some private keys remove all leading zeros, this is also invalid
+ // according to [SEC1] but since OpenSSL used to do this, we ignore
+ // this too.
+ copy(privateKey[len(privateKey)-len(privKey.PrivateKey):], privKey.PrivateKey)
+ priv.X, priv.Y = curve.ScalarBaseMult(privateKey)
+
+ return priv, nil
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/test-dir.crt b/vendor/github.com/google/certificate-transparency-go/x509/test-dir.crt
new file mode 100644
index 00000000000..b7fc9c51861
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/test-dir.crt
@@ -0,0 +1,31 @@
+-----BEGIN CERTIFICATE-----
+MIIFazCCA1OgAwIBAgIJAL8a/lsnspOqMA0GCSqGSIb3DQEBCwUAMEwxCzAJBgNV
+BAYTAlVLMRMwEQYDVQQIDApUZXN0LVN0YXRlMRUwEwYDVQQKDAxHb2xhbmcgVGVz
+dHMxETAPBgNVBAMMCHRlc3QtZGlyMB4XDTE3MDIwMTIzNTAyN1oXDTI3MDEzMDIz
+NTAyN1owTDELMAkGA1UEBhMCVUsxEzARBgNVBAgMClRlc3QtU3RhdGUxFTATBgNV
+BAoMDEdvbGFuZyBUZXN0czERMA8GA1UEAwwIdGVzdC1kaXIwggIiMA0GCSqGSIb3
+DQEBAQUAA4ICDwAwggIKAoICAQDzBoi43Yn30KN13PKFHu8LA4UmgCRToTukLItM
+WK2Je45grs/axg9n3YJOXC6hmsyrkOnyBcx1xVNgSrOAll7fSjtChRIX72Xrloxu
+XewtWVIrijqz6oylbvEmbRT3O8uynu5rF82Pmdiy8oiSfdywjKuPnE0hjV1ZSCql
+MYcXqA+f0JFD8kMv4pbtxjGH8f2DkYQz+hHXLrJH4/MEYdVMQXoz/GDzLyOkrXBN
+hpMaBBqg1p0P+tRdfLXuliNzA9vbZylzpF1YZ0gvsr0S5Y6LVtv7QIRygRuLY4kF
+k+UYuFq8NrV8TykS7FVnO3tf4XcYZ7r2KV5FjYSrJtNNo85BV5c3xMD3fJ2XcOWk
++oD1ATdgAM3aKmSOxNtNItKKxBe1mkqDH41NbWx7xMad78gDznyeT0tjEOltN2bM
+uXU1R/jgR/vq5Ec0AhXJyL/ziIcmuV2fSl/ZxT4ARD+16tgPiIx+welTf0v27/JY
+adlfkkL5XsPRrbSguISrj7JeaO/gjG3KnDVHcZvYBpDfHqRhCgrosfe26TZcTXx2
+cRxOfvBjMz1zJAg+esuUzSkerreyRhzD7RpeZTwi6sxvx82MhYMbA3w1LtgdABio
+9JRqZy3xqsIbNv7N46WO/qXL1UMRKb1UyHeW8g8btboz+B4zv1U0Nj+9qxPBbQui
+dgL9LQIDAQABo1AwTjAdBgNVHQ4EFgQUy0/0W8nwQfz2tO6AZ2jPkEiTzvUwHwYD
+VR0jBBgwFoAUy0/0W8nwQfz2tO6AZ2jPkEiTzvUwDAYDVR0TBAUwAwEB/zANBgkq
+hkiG9w0BAQsFAAOCAgEAvEVnUYsIOt87rggmLPqEueynkuQ+562M8EDHSQl82zbe
+xDCxeg3DvPgKb+RvaUdt1362z/szK10SoeMgx6+EQLoV9LiVqXwNqeYfixrhrdw3
+ppAhYYhymdkbUQCEMHypmXP1vPhAz4o8Bs+eES1M+zO6ErBiD7SqkmBElT+GixJC
+6epC9ZQFs+dw3lPlbiZSsGE85sqc3VAs0/JgpL/pb1/Eg4s0FUhZD2C2uWdSyZGc
+g0/v3aXJCp4j/9VoNhI1WXz3M45nysZIL5OQgXymLqJElQa1pZ3Wa4i/nidvT4AT
+Xlxc/qijM8set/nOqp7hVd5J0uG6qdwLRILUddZ6OpXd7ZNi1EXg+Bpc7ehzGsDt
+3UFGzYXDjxYnK2frQfjLS8stOQIqSrGthW6x0fdkVx0y8BByvd5J6+JmZl4UZfzA
+m99VxXSt4B9x6BvnY7ktzcFDOjtuLc4B/7yg9fv1eQuStA4cHGGAttsCg1X/Kx8W
+PvkkeH0UWDZ9vhH9K36703z89da6MWF+bz92B0+4HoOmlVaXRkvblsNaynJnL0LC
+Ayry7QBxuh5cMnDdRwJB3AVJIiJ1GVpb7aGvBOnx+s2lwRv9HWtghb+cbwwktx1M
+JHyBf3GZNSWTpKY7cD8V+NnBv3UuioOVVo+XAU4LF/bYUjdRpxWADJizNtZrtFo=
+-----END CERTIFICATE-----
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/test-file.crt b/vendor/github.com/google/certificate-transparency-go/x509/test-file.crt
new file mode 100644
index 00000000000..caa83b9f824
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/test-file.crt
@@ -0,0 +1,32 @@
+-----BEGIN CERTIFICATE-----
+MIIFbTCCA1WgAwIBAgIJAN338vEmMtLsMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNV
+BAYTAlVLMRMwEQYDVQQIDApUZXN0LVN0YXRlMRUwEwYDVQQKDAxHb2xhbmcgVGVz
+dHMxEjAQBgNVBAMMCXRlc3QtZmlsZTAeFw0xNzAyMDEyMzUyMDhaFw0yNzAxMzAy
+MzUyMDhaME0xCzAJBgNVBAYTAlVLMRMwEQYDVQQIDApUZXN0LVN0YXRlMRUwEwYD
+VQQKDAxHb2xhbmcgVGVzdHMxEjAQBgNVBAMMCXRlc3QtZmlsZTCCAiIwDQYJKoZI
+hvcNAQEBBQADggIPADCCAgoCggIBAPMGiLjdiffQo3Xc8oUe7wsDhSaAJFOhO6Qs
+i0xYrYl7jmCuz9rGD2fdgk5cLqGazKuQ6fIFzHXFU2BKs4CWXt9KO0KFEhfvZeuW
+jG5d7C1ZUiuKOrPqjKVu8SZtFPc7y7Ke7msXzY+Z2LLyiJJ93LCMq4+cTSGNXVlI
+KqUxhxeoD5/QkUPyQy/ilu3GMYfx/YORhDP6Edcuskfj8wRh1UxBejP8YPMvI6St
+cE2GkxoEGqDWnQ/61F18te6WI3MD29tnKXOkXVhnSC+yvRLljotW2/tAhHKBG4tj
+iQWT5Ri4Wrw2tXxPKRLsVWc7e1/hdxhnuvYpXkWNhKsm002jzkFXlzfEwPd8nZdw
+5aT6gPUBN2AAzdoqZI7E200i0orEF7WaSoMfjU1tbHvExp3vyAPOfJ5PS2MQ6W03
+Zsy5dTVH+OBH++rkRzQCFcnIv/OIhya5XZ9KX9nFPgBEP7Xq2A+IjH7B6VN/S/bv
+8lhp2V+SQvlew9GttKC4hKuPsl5o7+CMbcqcNUdxm9gGkN8epGEKCuix97bpNlxN
+fHZxHE5+8GMzPXMkCD56y5TNKR6ut7JGHMPtGl5lPCLqzG/HzYyFgxsDfDUu2B0A
+GKj0lGpnLfGqwhs2/s3jpY7+pcvVQxEpvVTId5byDxu1ujP4HjO/VTQ2P72rE8Ft
+C6J2Av0tAgMBAAGjUDBOMB0GA1UdDgQWBBTLT/RbyfBB/Pa07oBnaM+QSJPO9TAf
+BgNVHSMEGDAWgBTLT/RbyfBB/Pa07oBnaM+QSJPO9TAMBgNVHRMEBTADAQH/MA0G
+CSqGSIb3DQEBCwUAA4ICAQB3sCntCcQwhMgRPPyvOCMyTcQ/Iv+cpfxz2Ck14nlx
+AkEAH2CH0ov5GWTt07/ur3aa5x+SAKi0J3wTD1cdiw4U/6Uin6jWGKKxvoo4IaeK
+SbM8w/6eKx6UbmHx7PA/eRABY9tTlpdPCVgw7/o3WDr03QM+IAtatzvaCPPczake
+pbdLwmBZB/v8V+6jUajy6jOgdSH0PyffGnt7MWgDETmNC6p/Xigp5eh+C8Fb4NGT
+xgHES5PBC+sruWp4u22bJGDKTvYNdZHsnw/CaKQWNsQqwisxa3/8N5v+PCff/pxl
+r05pE3PdHn9JrCl4iWdVlgtiI9BoPtQyDfa/OEFaScE8KYR8LxaAgdgp3zYncWls
+BpwQ6Y/A2wIkhlD9eEp5Ib2hz7isXOs9UwjdriKqrBXqcIAE5M+YIk3+KAQKxAtd
+4YsK3CSJ010uphr12YKqlScj4vuKFjuOtd5RyyMIxUG3lrrhAu2AzCeKCLdVgA8+
+75FrYMApUdvcjp4uzbBoED4XRQlx9kdFHVbYgmE/+yddBYJM8u4YlgAL0hW2/D8p
+z9JWIfxVmjJnBnXaKGBuiUyZ864A3PJndP6EMMo7TzS2CDnfCYuJjvI0KvDjFNmc
+rQA04+qfMSEz3nmKhbbZu4eYLzlADhfH8tT4GMtXf71WLA5AUHGf2Y4+HIHTsmHG
+vQ==
+-----END CERTIFICATE-----
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/verify.go b/vendor/github.com/google/certificate-transparency-go/x509/verify.go
new file mode 100644
index 00000000000..07118c2bf65
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/verify.go
@@ -0,0 +1,1109 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "os"
+ "reflect"
+ "runtime"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+// ignoreCN disables interpreting Common Name as a hostname. See issue 24151.
+var ignoreCN = strings.Contains(os.Getenv("GODEBUG"), "x509ignoreCN=1")
+
+type InvalidReason int
+
+const (
+ // NotAuthorizedToSign results when a certificate is signed by another
+ // which isn't marked as a CA certificate.
+ NotAuthorizedToSign InvalidReason = iota
+ // Expired results when a certificate has expired, based on the time
+ // given in the VerifyOptions.
+ Expired
+ // CANotAuthorizedForThisName results when an intermediate or root
+ // certificate has a name constraint which doesn't permit a DNS or
+ // other name (including IP address) in the leaf certificate.
+ CANotAuthorizedForThisName
+ // TooManyIntermediates results when a path length constraint is
+ // violated.
+ TooManyIntermediates
+ // IncompatibleUsage results when the certificate's key usage indicates
+ // that it may only be used for a different purpose.
+ IncompatibleUsage
+ // NameMismatch results when the subject name of a parent certificate
+ // does not match the issuer name in the child.
+ NameMismatch
+ // NameConstraintsWithoutSANs results when a leaf certificate doesn't
+ // contain a Subject Alternative Name extension, but a CA certificate
+ // contains name constraints, and the Common Name can be interpreted as
+ // a hostname.
+ //
+ // You can avoid this error by setting the experimental GODEBUG environment
+ // variable to "x509ignoreCN=1", disabling Common Name matching entirely.
+ // This behavior might become the default in the future.
+ NameConstraintsWithoutSANs
+ // UnconstrainedName results when a CA certificate contains permitted
+ // name constraints, but leaf certificate contains a name of an
+ // unsupported or unconstrained type.
+ UnconstrainedName
+ // TooManyConstraints results when the number of comparison operations
+ // needed to check a certificate exceeds the limit set by
+ // VerifyOptions.MaxConstraintComparisions. This limit exists to
+ // prevent pathological certificates can consuming excessive amounts of
+ // CPU time to verify.
+ TooManyConstraints
+ // CANotAuthorizedForExtKeyUsage results when an intermediate or root
+ // certificate does not permit a requested extended key usage.
+ CANotAuthorizedForExtKeyUsage
+)
+
+// CertificateInvalidError results when an odd error occurs. Users of this
+// library probably want to handle all these errors uniformly.
+type CertificateInvalidError struct {
+ Cert *Certificate
+ Reason InvalidReason
+ Detail string
+}
+
+func (e CertificateInvalidError) Error() string {
+ switch e.Reason {
+ case NotAuthorizedToSign:
+ return "x509: certificate is not authorized to sign other certificates"
+ case Expired:
+ return "x509: certificate has expired or is not yet valid: " + e.Detail
+ case CANotAuthorizedForThisName:
+ return "x509: a root or intermediate certificate is not authorized to sign for this name: " + e.Detail
+ case CANotAuthorizedForExtKeyUsage:
+ return "x509: a root or intermediate certificate is not authorized for an extended key usage: " + e.Detail
+ case TooManyIntermediates:
+ return "x509: too many intermediates for path length constraint"
+ case IncompatibleUsage:
+ return "x509: certificate specifies an incompatible key usage"
+ case NameMismatch:
+ return "x509: issuer name does not match subject from issuing certificate"
+ case NameConstraintsWithoutSANs:
+ return "x509: issuer has name constraints but leaf doesn't have a SAN extension"
+ case UnconstrainedName:
+ return "x509: issuer has name constraints but leaf contains unknown or unconstrained name: " + e.Detail
+ }
+ return "x509: unknown error"
+}
+
+// HostnameError results when the set of authorized names doesn't match the
+// requested name.
+type HostnameError struct {
+ Certificate *Certificate
+ Host string
+}
+
+func (h HostnameError) Error() string {
+ c := h.Certificate
+
+ if !c.hasSANExtension() && !validHostname(c.Subject.CommonName) &&
+ matchHostnames(toLowerCaseASCII(c.Subject.CommonName), toLowerCaseASCII(h.Host)) {
+ // This would have validated, if it weren't for the validHostname check on Common Name.
+ return "x509: Common Name is not a valid hostname: " + c.Subject.CommonName
+ }
+
+ var valid string
+ if ip := net.ParseIP(h.Host); ip != nil {
+ // Trying to validate an IP
+ if len(c.IPAddresses) == 0 {
+ return "x509: cannot validate certificate for " + h.Host + " because it doesn't contain any IP SANs"
+ }
+ for _, san := range c.IPAddresses {
+ if len(valid) > 0 {
+ valid += ", "
+ }
+ valid += san.String()
+ }
+ } else {
+ if c.commonNameAsHostname() {
+ valid = c.Subject.CommonName
+ } else {
+ valid = strings.Join(c.DNSNames, ", ")
+ }
+ }
+
+ if len(valid) == 0 {
+ return "x509: certificate is not valid for any names, but wanted to match " + h.Host
+ }
+ return "x509: certificate is valid for " + valid + ", not " + h.Host
+}
+
+// UnknownAuthorityError results when the certificate issuer is unknown
+type UnknownAuthorityError struct {
+ Cert *Certificate
+ // hintErr contains an error that may be helpful in determining why an
+ // authority wasn't found.
+ hintErr error
+ // hintCert contains a possible authority certificate that was rejected
+ // because of the error in hintErr.
+ hintCert *Certificate
+}
+
+func (e UnknownAuthorityError) Error() string {
+ s := "x509: certificate signed by unknown authority"
+ if e.hintErr != nil {
+ certName := e.hintCert.Subject.CommonName
+ if len(certName) == 0 {
+ if len(e.hintCert.Subject.Organization) > 0 {
+ certName = e.hintCert.Subject.Organization[0]
+ } else {
+ certName = "serial:" + e.hintCert.SerialNumber.String()
+ }
+ }
+ s += fmt.Sprintf(" (possibly because of %q while trying to verify candidate authority certificate %q)", e.hintErr, certName)
+ }
+ return s
+}
+
+// SystemRootsError results when we fail to load the system root certificates.
+type SystemRootsError struct {
+ Err error
+}
+
+func (se SystemRootsError) Error() string {
+ msg := "x509: failed to load system roots and no roots provided"
+ if se.Err != nil {
+ return msg + "; " + se.Err.Error()
+ }
+ return msg
+}
+
+// errNotParsed is returned when a certificate without ASN.1 contents is
+// verified. Platform-specific verification needs the ASN.1 contents.
+var errNotParsed = errors.New("x509: missing ASN.1 contents; use ParseCertificate")
+
+// VerifyOptions contains parameters for Certificate.Verify. It's a structure
+// because other PKIX verification APIs have ended up needing many options.
+type VerifyOptions struct {
+ DNSName string
+ Intermediates *CertPool
+ Roots *CertPool // if nil, the system roots are used
+ CurrentTime time.Time // if zero, the current time is used
+ // Options to disable various verification checks.
+ DisableTimeChecks bool
+ DisableCriticalExtensionChecks bool
+ DisableNameChecks bool
+ DisableEKUChecks bool
+ DisablePathLenChecks bool
+ DisableNameConstraintChecks bool
+ // KeyUsage specifies which Extended Key Usage values are acceptable. A leaf
+ // certificate is accepted if it contains any of the listed values. An empty
+ // list means ExtKeyUsageServerAuth. To accept any key usage, include
+ // ExtKeyUsageAny.
+ //
+ // Certificate chains are required to nest these extended key usage values.
+ // (This matches the Windows CryptoAPI behavior, but not the spec.)
+ KeyUsages []ExtKeyUsage
+ // MaxConstraintComparisions is the maximum number of comparisons to
+ // perform when checking a given certificate's name constraints. If
+ // zero, a sensible default is used. This limit prevents pathological
+ // certificates from consuming excessive amounts of CPU time when
+ // validating.
+ MaxConstraintComparisions int
+}
+
+const (
+ leafCertificate = iota
+ intermediateCertificate
+ rootCertificate
+)
+
+// rfc2821Mailbox represents a “mailbox” (which is an email address to most
+// people) by breaking it into the “local” (i.e. before the '@') and “domain”
+// parts.
+type rfc2821Mailbox struct {
+ local, domain string
+}
+
+// parseRFC2821Mailbox parses an email address into local and domain parts,
+// based on the ABNF for a “Mailbox” from RFC 2821. According to RFC 5280,
+// Section 4.2.1.6 that's correct for an rfc822Name from a certificate: “The
+// format of an rfc822Name is a "Mailbox" as defined in RFC 2821, Section 4.1.2”.
+func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) {
+ if len(in) == 0 {
+ return mailbox, false
+ }
+
+ localPartBytes := make([]byte, 0, len(in)/2)
+
+ if in[0] == '"' {
+ // Quoted-string = DQUOTE *qcontent DQUOTE
+ // non-whitespace-control = %d1-8 / %d11 / %d12 / %d14-31 / %d127
+ // qcontent = qtext / quoted-pair
+ // qtext = non-whitespace-control /
+ // %d33 / %d35-91 / %d93-126
+ // quoted-pair = ("\" text) / obs-qp
+ // text = %d1-9 / %d11 / %d12 / %d14-127 / obs-text
+ //
+ // (Names beginning with “obs-” are the obsolete syntax from RFC 2822,
+ // Section 4. Since it has been 16 years, we no longer accept that.)
+ in = in[1:]
+ QuotedString:
+ for {
+ if len(in) == 0 {
+ return mailbox, false
+ }
+ c := in[0]
+ in = in[1:]
+
+ switch {
+ case c == '"':
+ break QuotedString
+
+ case c == '\\':
+ // quoted-pair
+ if len(in) == 0 {
+ return mailbox, false
+ }
+ if in[0] == 11 ||
+ in[0] == 12 ||
+ (1 <= in[0] && in[0] <= 9) ||
+ (14 <= in[0] && in[0] <= 127) {
+ localPartBytes = append(localPartBytes, in[0])
+ in = in[1:]
+ } else {
+ return mailbox, false
+ }
+
+ case c == 11 ||
+ c == 12 ||
+ // Space (char 32) is not allowed based on the
+ // BNF, but RFC 3696 gives an example that
+ // assumes that it is. Several “verified”
+ // errata continue to argue about this point.
+ // We choose to accept it.
+ c == 32 ||
+ c == 33 ||
+ c == 127 ||
+ (1 <= c && c <= 8) ||
+ (14 <= c && c <= 31) ||
+ (35 <= c && c <= 91) ||
+ (93 <= c && c <= 126):
+ // qtext
+ localPartBytes = append(localPartBytes, c)
+
+ default:
+ return mailbox, false
+ }
+ }
+ } else {
+ // Atom ("." Atom)*
+ NextChar:
+ for len(in) > 0 {
+ // atext from RFC 2822, Section 3.2.4
+ c := in[0]
+
+ switch {
+ case c == '\\':
+ // Examples given in RFC 3696 suggest that
+ // escaped characters can appear outside of a
+ // quoted string. Several “verified” errata
+ // continue to argue the point. We choose to
+ // accept it.
+ in = in[1:]
+ if len(in) == 0 {
+ return mailbox, false
+ }
+ fallthrough
+
+ case ('0' <= c && c <= '9') ||
+ ('a' <= c && c <= 'z') ||
+ ('A' <= c && c <= 'Z') ||
+ c == '!' || c == '#' || c == '$' || c == '%' ||
+ c == '&' || c == '\'' || c == '*' || c == '+' ||
+ c == '-' || c == '/' || c == '=' || c == '?' ||
+ c == '^' || c == '_' || c == '`' || c == '{' ||
+ c == '|' || c == '}' || c == '~' || c == '.':
+ localPartBytes = append(localPartBytes, in[0])
+ in = in[1:]
+
+ default:
+ break NextChar
+ }
+ }
+
+ if len(localPartBytes) == 0 {
+ return mailbox, false
+ }
+
+ // From RFC 3696, Section 3:
+ // “period (".") may also appear, but may not be used to start
+ // or end the local part, nor may two or more consecutive
+ // periods appear.”
+ twoDots := []byte{'.', '.'}
+ if localPartBytes[0] == '.' ||
+ localPartBytes[len(localPartBytes)-1] == '.' ||
+ bytes.Contains(localPartBytes, twoDots) {
+ return mailbox, false
+ }
+ }
+
+ if len(in) == 0 || in[0] != '@' {
+ return mailbox, false
+ }
+ in = in[1:]
+
+ // The RFC species a format for domains, but that's known to be
+ // violated in practice so we accept that anything after an '@' is the
+ // domain part.
+ if _, ok := domainToReverseLabels(in); !ok {
+ return mailbox, false
+ }
+
+ mailbox.local = string(localPartBytes)
+ mailbox.domain = in
+ return mailbox, true
+}
+
+// domainToReverseLabels converts a textual domain name like foo.example.com to
+// the list of labels in reverse order, e.g. ["com", "example", "foo"].
+func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) {
+ for len(domain) > 0 {
+ if i := strings.LastIndexByte(domain, '.'); i == -1 {
+ reverseLabels = append(reverseLabels, domain)
+ domain = ""
+ } else {
+ reverseLabels = append(reverseLabels, domain[i+1:])
+ domain = domain[:i]
+ }
+ }
+
+ if len(reverseLabels) > 0 && len(reverseLabels[0]) == 0 {
+ // An empty label at the end indicates an absolute value.
+ return nil, false
+ }
+
+ for _, label := range reverseLabels {
+ if len(label) == 0 {
+ // Empty labels are otherwise invalid.
+ return nil, false
+ }
+
+ for _, c := range label {
+ if c < 33 || c > 126 {
+ // Invalid character.
+ return nil, false
+ }
+ }
+ }
+
+ return reverseLabels, true
+}
+
+func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string) (bool, error) {
+ // If the constraint contains an @, then it specifies an exact mailbox
+ // name.
+ if strings.Contains(constraint, "@") {
+ constraintMailbox, ok := parseRFC2821Mailbox(constraint)
+ if !ok {
+ return false, fmt.Errorf("x509: internal error: cannot parse constraint %q", constraint)
+ }
+ return mailbox.local == constraintMailbox.local && strings.EqualFold(mailbox.domain, constraintMailbox.domain), nil
+ }
+
+ // Otherwise the constraint is like a DNS constraint of the domain part
+ // of the mailbox.
+ return matchDomainConstraint(mailbox.domain, constraint)
+}
+
+func matchURIConstraint(uri *url.URL, constraint string) (bool, error) {
+ // From RFC 5280, Section 4.2.1.10:
+ // “a uniformResourceIdentifier that does not include an authority
+ // component with a host name specified as a fully qualified domain
+ // name (e.g., if the URI either does not include an authority
+ // component or includes an authority component in which the host name
+ // is specified as an IP address), then the application MUST reject the
+ // certificate.”
+
+ host := uri.Host
+ if len(host) == 0 {
+ return false, fmt.Errorf("URI with empty host (%q) cannot be matched against constraints", uri.String())
+ }
+
+ if strings.Contains(host, ":") && !strings.HasSuffix(host, "]") {
+ var err error
+ host, _, err = net.SplitHostPort(uri.Host)
+ if err != nil {
+ return false, err
+ }
+ }
+
+ if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") ||
+ net.ParseIP(host) != nil {
+ return false, fmt.Errorf("URI with IP (%q) cannot be matched against constraints", uri.String())
+ }
+
+ return matchDomainConstraint(host, constraint)
+}
+
+func matchIPConstraint(ip net.IP, constraint *net.IPNet) (bool, error) {
+ if len(ip) != len(constraint.IP) {
+ return false, nil
+ }
+
+ for i := range ip {
+ if mask := constraint.Mask[i]; ip[i]&mask != constraint.IP[i]&mask {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func matchDomainConstraint(domain, constraint string) (bool, error) {
+ // The meaning of zero length constraints is not specified, but this
+ // code follows NSS and accepts them as matching everything.
+ if len(constraint) == 0 {
+ return true, nil
+ }
+
+ domainLabels, ok := domainToReverseLabels(domain)
+ if !ok {
+ return false, fmt.Errorf("x509: internal error: cannot parse domain %q", domain)
+ }
+
+ // RFC 5280 says that a leading period in a domain name means that at
+ // least one label must be prepended, but only for URI and email
+ // constraints, not DNS constraints. The code also supports that
+ // behaviour for DNS constraints.
+
+ mustHaveSubdomains := false
+ if constraint[0] == '.' {
+ mustHaveSubdomains = true
+ constraint = constraint[1:]
+ }
+
+ constraintLabels, ok := domainToReverseLabels(constraint)
+ if !ok {
+ return false, fmt.Errorf("x509: internal error: cannot parse domain %q", constraint)
+ }
+
+ if len(domainLabels) < len(constraintLabels) ||
+ (mustHaveSubdomains && len(domainLabels) == len(constraintLabels)) {
+ return false, nil
+ }
+
+ for i, constraintLabel := range constraintLabels {
+ if !strings.EqualFold(constraintLabel, domainLabels[i]) {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+// checkNameConstraints checks that c permits a child certificate to claim the
+// given name, of type nameType. The argument parsedName contains the parsed
+// form of name, suitable for passing to the match function. The total number
+// of comparisons is tracked in the given count and should not exceed the given
+// limit.
+func (c *Certificate) checkNameConstraints(count *int,
+ maxConstraintComparisons int,
+ nameType string,
+ name string,
+ parsedName interface{},
+ match func(parsedName, constraint interface{}) (match bool, err error),
+ permitted, excluded interface{}) error {
+
+ excludedValue := reflect.ValueOf(excluded)
+
+ *count += excludedValue.Len()
+ if *count > maxConstraintComparisons {
+ return CertificateInvalidError{c, TooManyConstraints, ""}
+ }
+
+ for i := 0; i < excludedValue.Len(); i++ {
+ constraint := excludedValue.Index(i).Interface()
+ match, err := match(parsedName, constraint)
+ if err != nil {
+ return CertificateInvalidError{c, CANotAuthorizedForThisName, err.Error()}
+ }
+
+ if match {
+ return CertificateInvalidError{c, CANotAuthorizedForThisName, fmt.Sprintf("%s %q is excluded by constraint %q", nameType, name, constraint)}
+ }
+ }
+
+ permittedValue := reflect.ValueOf(permitted)
+
+ *count += permittedValue.Len()
+ if *count > maxConstraintComparisons {
+ return CertificateInvalidError{c, TooManyConstraints, ""}
+ }
+
+ ok := true
+ for i := 0; i < permittedValue.Len(); i++ {
+ constraint := permittedValue.Index(i).Interface()
+
+ var err error
+ if ok, err = match(parsedName, constraint); err != nil {
+ return CertificateInvalidError{c, CANotAuthorizedForThisName, err.Error()}
+ }
+
+ if ok {
+ break
+ }
+ }
+
+ if !ok {
+ return CertificateInvalidError{c, CANotAuthorizedForThisName, fmt.Sprintf("%s %q is not permitted by any constraint", nameType, name)}
+ }
+
+ return nil
+}
+
+// isValid performs validity checks on c given that it is a candidate to append
+// to the chain in currentChain.
+func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *VerifyOptions) error {
+ if !opts.DisableCriticalExtensionChecks && len(c.UnhandledCriticalExtensions) > 0 {
+ return UnhandledCriticalExtension{ID: c.UnhandledCriticalExtensions[0]}
+ }
+
+ if !opts.DisableNameChecks && len(currentChain) > 0 {
+ child := currentChain[len(currentChain)-1]
+ if !bytes.Equal(child.RawIssuer, c.RawSubject) {
+ return CertificateInvalidError{c, NameMismatch, ""}
+ }
+ }
+
+ if !opts.DisableTimeChecks {
+ now := opts.CurrentTime
+ if now.IsZero() {
+ now = time.Now()
+ }
+ if now.Before(c.NotBefore) {
+ return CertificateInvalidError{
+ Cert: c,
+ Reason: Expired,
+ Detail: fmt.Sprintf("current time %s is before %s", now.Format(time.RFC3339), c.NotBefore.Format(time.RFC3339)),
+ }
+ } else if now.After(c.NotAfter) {
+ return CertificateInvalidError{
+ Cert: c,
+ Reason: Expired,
+ Detail: fmt.Sprintf("current time %s is after %s", now.Format(time.RFC3339), c.NotAfter.Format(time.RFC3339)),
+ }
+ }
+ }
+
+ maxConstraintComparisons := opts.MaxConstraintComparisions
+ if maxConstraintComparisons == 0 {
+ maxConstraintComparisons = 250000
+ }
+ comparisonCount := 0
+
+ var leaf *Certificate
+ if certType == intermediateCertificate || certType == rootCertificate {
+ if len(currentChain) == 0 {
+ return errors.New("x509: internal error: empty chain when appending CA cert")
+ }
+ leaf = currentChain[0]
+ }
+
+ checkNameConstraints := !opts.DisableNameConstraintChecks && (certType == intermediateCertificate || certType == rootCertificate) && c.hasNameConstraints()
+ if checkNameConstraints && leaf.commonNameAsHostname() {
+ // This is the deprecated, legacy case of depending on the commonName as
+ // a hostname. We don't enforce name constraints against the CN, but
+ // VerifyHostname will look for hostnames in there if there are no SANs.
+ // In order to ensure VerifyHostname will not accept an unchecked name,
+ // return an error here.
+ return CertificateInvalidError{c, NameConstraintsWithoutSANs, ""}
+ } else if checkNameConstraints && leaf.hasSANExtension() {
+ err := forEachSAN(leaf.getSANExtension(), func(tag int, data []byte) error {
+ switch tag {
+ case nameTypeEmail:
+ name := string(data)
+ mailbox, ok := parseRFC2821Mailbox(name)
+ if !ok {
+ return fmt.Errorf("x509: cannot parse rfc822Name %q", mailbox)
+ }
+
+ if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox,
+ func(parsedName, constraint interface{}) (bool, error) {
+ return matchEmailConstraint(parsedName.(rfc2821Mailbox), constraint.(string))
+ }, c.PermittedEmailAddresses, c.ExcludedEmailAddresses); err != nil {
+ return err
+ }
+
+ case nameTypeDNS:
+ name := string(data)
+ if _, ok := domainToReverseLabels(name); !ok {
+ return fmt.Errorf("x509: cannot parse dnsName %q", name)
+ }
+
+ if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name,
+ func(parsedName, constraint interface{}) (bool, error) {
+ return matchDomainConstraint(parsedName.(string), constraint.(string))
+ }, c.PermittedDNSDomains, c.ExcludedDNSDomains); err != nil {
+ return err
+ }
+
+ case nameTypeURI:
+ name := string(data)
+ uri, err := url.Parse(name)
+ if err != nil {
+ return fmt.Errorf("x509: internal error: URI SAN %q failed to parse", name)
+ }
+
+ if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "URI", name, uri,
+ func(parsedName, constraint interface{}) (bool, error) {
+ return matchURIConstraint(parsedName.(*url.URL), constraint.(string))
+ }, c.PermittedURIDomains, c.ExcludedURIDomains); err != nil {
+ return err
+ }
+
+ case nameTypeIP:
+ ip := net.IP(data)
+ if l := len(ip); l != net.IPv4len && l != net.IPv6len {
+ return fmt.Errorf("x509: internal error: IP SAN %x failed to parse", data)
+ }
+
+ if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "IP address", ip.String(), ip,
+ func(parsedName, constraint interface{}) (bool, error) {
+ return matchIPConstraint(parsedName.(net.IP), constraint.(*net.IPNet))
+ }, c.PermittedIPRanges, c.ExcludedIPRanges); err != nil {
+ return err
+ }
+
+ default:
+ // Unknown SAN types are ignored.
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ return err
+ }
+ }
+
+ // KeyUsage status flags are ignored. From Engineering Security, Peter
+ // Gutmann: A European government CA marked its signing certificates as
+ // being valid for encryption only, but no-one noticed. Another
+ // European CA marked its signature keys as not being valid for
+ // signatures. A different CA marked its own trusted root certificate
+ // as being invalid for certificate signing. Another national CA
+ // distributed a certificate to be used to encrypt data for the
+ // country’s tax authority that was marked as only being usable for
+ // digital signatures but not for encryption. Yet another CA reversed
+ // the order of the bit flags in the keyUsage due to confusion over
+ // encoding endianness, essentially setting a random keyUsage in
+ // certificates that it issued. Another CA created a self-invalidating
+ // certificate by adding a certificate policy statement stipulating
+ // that the certificate had to be used strictly as specified in the
+ // keyUsage, and a keyUsage containing a flag indicating that the RSA
+ // encryption key could only be used for Diffie-Hellman key agreement.
+
+ if certType == intermediateCertificate && (!c.BasicConstraintsValid || !c.IsCA) {
+ return CertificateInvalidError{c, NotAuthorizedToSign, ""}
+ }
+
+ if !opts.DisablePathLenChecks && c.BasicConstraintsValid && c.MaxPathLen >= 0 {
+ numIntermediates := len(currentChain) - 1
+ if numIntermediates > c.MaxPathLen {
+ return CertificateInvalidError{c, TooManyIntermediates, ""}
+ }
+ }
+
+ return nil
+}
+
+// Verify attempts to verify c by building one or more chains from c to a
+// certificate in opts.Roots, using certificates in opts.Intermediates if
+// needed. If successful, it returns one or more chains where the first
+// element of the chain is c and the last element is from opts.Roots.
+//
+// If opts.Roots is nil and system roots are unavailable the returned error
+// will be of type SystemRootsError.
+//
+// Name constraints in the intermediates will be applied to all names claimed
+// in the chain, not just opts.DNSName. Thus it is invalid for a leaf to claim
+// example.com if an intermediate doesn't permit it, even if example.com is not
+// the name being validated. Note that DirectoryName constraints are not
+// supported.
+//
+// Extended Key Usage values are enforced down a chain, so an intermediate or
+// root that enumerates EKUs prevents a leaf from asserting an EKU not in that
+// list.
+//
+// WARNING: this function doesn't do any revocation checking.
+func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err error) {
+ // Platform-specific verification needs the ASN.1 contents so
+ // this makes the behavior consistent across platforms.
+ if len(c.Raw) == 0 {
+ return nil, errNotParsed
+ }
+ if opts.Intermediates != nil {
+ for _, intermediate := range opts.Intermediates.certs {
+ if len(intermediate.Raw) == 0 {
+ return nil, errNotParsed
+ }
+ }
+ }
+
+ // Use Windows's own verification and chain building.
+ if opts.Roots == nil && runtime.GOOS == "windows" {
+ return c.systemVerify(&opts)
+ }
+
+ if opts.Roots == nil {
+ opts.Roots = systemRootsPool()
+ if opts.Roots == nil {
+ return nil, SystemRootsError{systemRootsErr}
+ }
+ }
+
+ err = c.isValid(leafCertificate, nil, &opts)
+ if err != nil {
+ return
+ }
+
+ if len(opts.DNSName) > 0 {
+ err = c.VerifyHostname(opts.DNSName)
+ if err != nil {
+ return
+ }
+ }
+
+ var candidateChains [][]*Certificate
+ if opts.Roots.contains(c) {
+ candidateChains = append(candidateChains, []*Certificate{c})
+ } else {
+ if candidateChains, err = c.buildChains(nil, []*Certificate{c}, nil, &opts); err != nil {
+ return nil, err
+ }
+ }
+
+ keyUsages := opts.KeyUsages
+ if len(keyUsages) == 0 {
+ keyUsages = []ExtKeyUsage{ExtKeyUsageServerAuth}
+ }
+
+ // If any key usage is acceptable then we're done.
+ for _, usage := range keyUsages {
+ if usage == ExtKeyUsageAny {
+ return candidateChains, nil
+ }
+ }
+
+ for _, candidate := range candidateChains {
+ if opts.DisableEKUChecks || checkChainForKeyUsage(candidate, keyUsages) {
+ chains = append(chains, candidate)
+ }
+ }
+
+ if len(chains) == 0 {
+ return nil, CertificateInvalidError{c, IncompatibleUsage, ""}
+ }
+
+ return chains, nil
+}
+
+func appendToFreshChain(chain []*Certificate, cert *Certificate) []*Certificate {
+ n := make([]*Certificate, len(chain)+1)
+ copy(n, chain)
+ n[len(chain)] = cert
+ return n
+}
+
+// maxChainSignatureChecks is the maximum number of CheckSignatureFrom calls
+// that an invocation of buildChains will (tranistively) make. Most chains are
+// less than 15 certificates long, so this leaves space for multiple chains and
+// for failed checks due to different intermediates having the same Subject.
+const maxChainSignatureChecks = 100
+
+func (c *Certificate) buildChains(cache map[*Certificate][][]*Certificate, currentChain []*Certificate, sigChecks *int, opts *VerifyOptions) (chains [][]*Certificate, err error) {
+ var (
+ hintErr error
+ hintCert *Certificate
+ )
+
+ considerCandidate := func(certType int, candidate *Certificate) {
+ for _, cert := range currentChain {
+ if cert.Equal(candidate) {
+ return
+ }
+ }
+
+ if sigChecks == nil {
+ sigChecks = new(int)
+ }
+ *sigChecks++
+ if *sigChecks > maxChainSignatureChecks {
+ err = errors.New("x509: signature check attempts limit reached while verifying certificate chain")
+ return
+ }
+
+ if err := c.CheckSignatureFrom(candidate); err != nil {
+ if hintErr == nil {
+ hintErr = err
+ hintCert = candidate
+ }
+ return
+ }
+
+ err = candidate.isValid(certType, currentChain, opts)
+ if err != nil {
+ return
+ }
+
+ switch certType {
+ case rootCertificate:
+ chains = append(chains, appendToFreshChain(currentChain, candidate))
+ case intermediateCertificate:
+ if cache == nil {
+ cache = make(map[*Certificate][][]*Certificate)
+ }
+ childChains, ok := cache[candidate]
+ if !ok {
+ childChains, err = candidate.buildChains(cache, appendToFreshChain(currentChain, candidate), sigChecks, opts)
+ cache[candidate] = childChains
+ }
+ chains = append(chains, childChains...)
+ }
+ }
+
+ for _, rootNum := range opts.Roots.findPotentialParents(c) {
+ considerCandidate(rootCertificate, opts.Roots.certs[rootNum])
+ }
+ for _, intermediateNum := range opts.Intermediates.findPotentialParents(c) {
+ considerCandidate(intermediateCertificate, opts.Intermediates.certs[intermediateNum])
+ }
+
+ if len(chains) > 0 {
+ err = nil
+ }
+ if len(chains) == 0 && err == nil {
+ err = UnknownAuthorityError{c, hintErr, hintCert}
+ }
+
+ return
+}
+
+// validHostname reports whether host is a valid hostname that can be matched or
+// matched against according to RFC 6125 2.2, with some leniency to accommodate
+// legacy values.
+func validHostname(host string) bool {
+ host = strings.TrimSuffix(host, ".")
+
+ if len(host) == 0 {
+ return false
+ }
+
+ for i, part := range strings.Split(host, ".") {
+ if part == "" {
+ // Empty label.
+ return false
+ }
+ if i == 0 && part == "*" {
+ // Only allow full left-most wildcards, as those are the only ones
+ // we match, and matching literal '*' characters is probably never
+ // the expected behavior.
+ continue
+ }
+ for j, c := range part {
+ if 'a' <= c && c <= 'z' {
+ continue
+ }
+ if '0' <= c && c <= '9' {
+ continue
+ }
+ if 'A' <= c && c <= 'Z' {
+ continue
+ }
+ if c == '-' && j != 0 {
+ continue
+ }
+ if c == '_' || c == ':' {
+ // Not valid characters in hostnames, but commonly
+ // found in deployments outside the WebPKI.
+ continue
+ }
+ return false
+ }
+ }
+
+ return true
+}
+
+// commonNameAsHostname reports whether the Common Name field should be
+// considered the hostname that the certificate is valid for. This is a legacy
+// behavior, disabled if the Subject Alt Name extension is present.
+//
+// It applies the strict validHostname check to the Common Name field, so that
+// certificates without SANs can still be validated against CAs with name
+// constraints if there is no risk the CN would be matched as a hostname.
+// See NameConstraintsWithoutSANs and issue 24151.
+func (c *Certificate) commonNameAsHostname() bool {
+ return !ignoreCN && !c.hasSANExtension() && validHostname(c.Subject.CommonName)
+}
+
+func matchHostnames(pattern, host string) bool {
+ host = strings.TrimSuffix(host, ".")
+ pattern = strings.TrimSuffix(pattern, ".")
+
+ if len(pattern) == 0 || len(host) == 0 {
+ return false
+ }
+
+ patternParts := strings.Split(pattern, ".")
+ hostParts := strings.Split(host, ".")
+
+ if len(patternParts) != len(hostParts) {
+ return false
+ }
+
+ for i, patternPart := range patternParts {
+ if i == 0 && patternPart == "*" {
+ continue
+ }
+ if patternPart != hostParts[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+// toLowerCaseASCII returns a lower-case version of in. See RFC 6125 6.4.1. We use
+// an explicitly ASCII function to avoid any sharp corners resulting from
+// performing Unicode operations on DNS labels.
+func toLowerCaseASCII(in string) string {
+ // If the string is already lower-case then there's nothing to do.
+ isAlreadyLowerCase := true
+ for _, c := range in {
+ if c == utf8.RuneError {
+ // If we get a UTF-8 error then there might be
+ // upper-case ASCII bytes in the invalid sequence.
+ isAlreadyLowerCase = false
+ break
+ }
+ if 'A' <= c && c <= 'Z' {
+ isAlreadyLowerCase = false
+ break
+ }
+ }
+
+ if isAlreadyLowerCase {
+ return in
+ }
+
+ out := []byte(in)
+ for i, c := range out {
+ if 'A' <= c && c <= 'Z' {
+ out[i] += 'a' - 'A'
+ }
+ }
+ return string(out)
+}
+
+// VerifyHostname returns nil if c is a valid certificate for the named host.
+// Otherwise it returns an error describing the mismatch.
+func (c *Certificate) VerifyHostname(h string) error {
+ // IP addresses may be written in [ ].
+ candidateIP := h
+ if len(h) >= 3 && h[0] == '[' && h[len(h)-1] == ']' {
+ candidateIP = h[1 : len(h)-1]
+ }
+ if ip := net.ParseIP(candidateIP); ip != nil {
+ // We only match IP addresses against IP SANs.
+ // See RFC 6125, Appendix B.2.
+ for _, candidate := range c.IPAddresses {
+ if ip.Equal(candidate) {
+ return nil
+ }
+ }
+ return HostnameError{c, candidateIP}
+ }
+
+ lowered := toLowerCaseASCII(h)
+
+ if c.commonNameAsHostname() {
+ if matchHostnames(toLowerCaseASCII(c.Subject.CommonName), lowered) {
+ return nil
+ }
+ } else {
+ for _, match := range c.DNSNames {
+ if matchHostnames(toLowerCaseASCII(match), lowered) {
+ return nil
+ }
+ }
+ }
+
+ return HostnameError{c, h}
+}
+
+func checkChainForKeyUsage(chain []*Certificate, keyUsages []ExtKeyUsage) bool {
+ usages := make([]ExtKeyUsage, len(keyUsages))
+ copy(usages, keyUsages)
+
+ if len(chain) == 0 {
+ return false
+ }
+
+ usagesRemaining := len(usages)
+
+ // We walk down the list and cross out any usages that aren't supported
+ // by each certificate. If we cross out all the usages, then the chain
+ // is unacceptable.
+
+NextCert:
+ for i := len(chain) - 1; i >= 0; i-- {
+ cert := chain[i]
+ if len(cert.ExtKeyUsage) == 0 && len(cert.UnknownExtKeyUsage) == 0 {
+ // The certificate doesn't have any extended key usage specified.
+ continue
+ }
+
+ for _, usage := range cert.ExtKeyUsage {
+ if usage == ExtKeyUsageAny {
+ // The certificate is explicitly good for any usage.
+ continue NextCert
+ }
+ }
+
+ const invalidUsage ExtKeyUsage = -1
+
+ NextRequestedUsage:
+ for i, requestedUsage := range usages {
+ if requestedUsage == invalidUsage {
+ continue
+ }
+
+ for _, usage := range cert.ExtKeyUsage {
+ if requestedUsage == usage {
+ continue NextRequestedUsage
+ } else if requestedUsage == ExtKeyUsageServerAuth &&
+ (usage == ExtKeyUsageNetscapeServerGatedCrypto ||
+ usage == ExtKeyUsageMicrosoftServerGatedCrypto) {
+ // In order to support COMODO
+ // certificate chains, we have to
+ // accept Netscape or Microsoft SGC
+ // usages as equal to ServerAuth.
+ continue NextRequestedUsage
+ }
+ }
+
+ usages[i] = invalidUsage
+ usagesRemaining--
+ if usagesRemaining == 0 {
+ return false
+ }
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509/x509.go b/vendor/github.com/google/certificate-transparency-go/x509/x509.go
new file mode 100644
index 00000000000..917d78779f3
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509/x509.go
@@ -0,0 +1,3307 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package x509 parses X.509-encoded keys and certificates.
+//
+// On UNIX systems the environment variables SSL_CERT_FILE and SSL_CERT_DIR
+// can be used to override the system default locations for the SSL certificate
+// file and SSL certificate files directory, respectively.
+//
+// This is a fork of the Go library crypto/x509 package, primarily adapted for
+// use with Certificate Transparency. Main areas of difference are:
+//
+// Life as a fork:
+// - Rename OS-specific cgo code so it doesn't clash with main Go library.
+// - Use local library imports (asn1, pkix) throughout.
+// - Add version-specific wrappers for Go version-incompatible code (in
+// ptr_*_windows.go).
+// Laxer certificate parsing:
+// - Add options to disable various validation checks (times, EKUs etc).
+// - Use NonFatalErrors type for some errors and continue parsing; this
+// can be checked with IsFatal(err).
+// - Support for short bitlength ECDSA curves (in curves.go).
+// Certificate Transparency specific function:
+// - Parsing and marshaling of SCTList extension.
+// - RemoveSCTList() function for rebuilding CT leaf entry.
+// - Pre-certificate processing (RemoveCTPoison(), BuildPrecertTBS(),
+// ParseTBSCertificate(), IsPrecertificate()).
+// Revocation list processing:
+// - Detailed CRL parsing (in revoked.go)
+// - Detailed error recording mechanism (in error.go, errors.go)
+// - Factor out parseDistributionPoints() for reuse.
+// - Factor out and generalize GeneralNames parsing (in names.go)
+// - Fix CRL commenting.
+// RPKI support:
+// - Support for SubjectInfoAccess extension
+// - Support for RFC3779 extensions (in rpki.go)
+// RSAES-OAEP support:
+// - Support for parsing RSASES-OAEP public keys from certificates
+// Ed25519 support:
+// - Support for parsing and marshaling Ed25519 keys
+// General improvements:
+// - Export and use OID values throughout.
+// - Export OIDFromNamedCurve().
+// - Export SignatureAlgorithmFromAI().
+// - Add OID value to UnhandledCriticalExtension error.
+// - Minor typo/lint fixes.
+package x509
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rsa"
+ _ "crypto/sha1"
+ _ "crypto/sha256"
+ _ "crypto/sha512"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+ "net"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "golang.org/x/crypto/cryptobyte"
+ cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1"
+ "golang.org/x/crypto/ed25519"
+
+ "github.com/google/certificate-transparency-go/asn1"
+ "github.com/google/certificate-transparency-go/tls"
+ "github.com/google/certificate-transparency-go/x509/pkix"
+)
+
+// pkixPublicKey reflects a PKIX public key structure. See SubjectPublicKeyInfo
+// in RFC 3280.
+type pkixPublicKey struct {
+ Algo pkix.AlgorithmIdentifier
+ BitString asn1.BitString
+}
+
+// ParsePKIXPublicKey parses a public key in PKIX, ASN.1 DER form.
+//
+// It returns a *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, or
+// ed25519.PublicKey. More types might be supported in the future.
+//
+// This kind of key is commonly encoded in PEM blocks of type "PUBLIC KEY".
+func ParsePKIXPublicKey(derBytes []byte) (pub interface{}, err error) {
+ var pki publicKeyInfo
+ if rest, err := asn1.Unmarshal(derBytes, &pki); err != nil {
+ return nil, err
+ } else if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after ASN.1 of public-key")
+ }
+ algo := getPublicKeyAlgorithmFromOID(pki.Algorithm.Algorithm)
+ if algo == UnknownPublicKeyAlgorithm {
+ return nil, errors.New("x509: unknown public key algorithm")
+ }
+ var nfe NonFatalErrors
+ pub, err = parsePublicKey(algo, &pki, &nfe)
+ if err != nil {
+ return pub, err
+ }
+ // Treat non-fatal errors as fatal for this entrypoint.
+ if len(nfe.Errors) > 0 {
+ return nil, nfe.Errors[0]
+ }
+ return pub, nil
+}
+
+func marshalPublicKey(pub interface{}) (publicKeyBytes []byte, publicKeyAlgorithm pkix.AlgorithmIdentifier, err error) {
+ switch pub := pub.(type) {
+ case *rsa.PublicKey:
+ publicKeyBytes, err = asn1.Marshal(pkcs1PublicKey{
+ N: pub.N,
+ E: pub.E,
+ })
+ if err != nil {
+ return nil, pkix.AlgorithmIdentifier{}, err
+ }
+ publicKeyAlgorithm.Algorithm = OIDPublicKeyRSA
+ // This is a NULL parameters value which is required by
+ // RFC 3279, Section 2.3.1.
+ publicKeyAlgorithm.Parameters = asn1.NullRawValue
+ case *ecdsa.PublicKey:
+ publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y)
+ oid, ok := OIDFromNamedCurve(pub.Curve)
+ if !ok {
+ return nil, pkix.AlgorithmIdentifier{}, errors.New("x509: unsupported elliptic curve")
+ }
+ publicKeyAlgorithm.Algorithm = OIDPublicKeyECDSA
+ var paramBytes []byte
+ paramBytes, err = asn1.Marshal(oid)
+ if err != nil {
+ return
+ }
+ publicKeyAlgorithm.Parameters.FullBytes = paramBytes
+ case ed25519.PublicKey:
+ publicKeyBytes = pub
+ publicKeyAlgorithm.Algorithm = OIDPublicKeyEd25519
+ default:
+ return nil, pkix.AlgorithmIdentifier{}, fmt.Errorf("x509: unsupported public key type: %T", pub)
+ }
+
+ return publicKeyBytes, publicKeyAlgorithm, nil
+}
+
+// MarshalPKIXPublicKey converts a public key to PKIX, ASN.1 DER form.
+//
+// The following key types are currently supported: *rsa.PublicKey, *ecdsa.PublicKey
+// and ed25519.PublicKey. Unsupported key types result in an error.
+//
+// This kind of key is commonly encoded in PEM blocks of type "PUBLIC KEY".
+func MarshalPKIXPublicKey(pub interface{}) ([]byte, error) {
+ var publicKeyBytes []byte
+ var publicKeyAlgorithm pkix.AlgorithmIdentifier
+ var err error
+
+ if publicKeyBytes, publicKeyAlgorithm, err = marshalPublicKey(pub); err != nil {
+ return nil, err
+ }
+
+ pkix := pkixPublicKey{
+ Algo: publicKeyAlgorithm,
+ BitString: asn1.BitString{
+ Bytes: publicKeyBytes,
+ BitLength: 8 * len(publicKeyBytes),
+ },
+ }
+
+ ret, _ := asn1.Marshal(pkix)
+ return ret, nil
+}
+
+// These structures reflect the ASN.1 structure of X.509 certificates.:
+
+type certificate struct {
+ Raw asn1.RawContent
+ TBSCertificate tbsCertificate
+ SignatureAlgorithm pkix.AlgorithmIdentifier
+ SignatureValue asn1.BitString
+}
+
+type tbsCertificate struct {
+ Raw asn1.RawContent
+ Version int `asn1:"optional,explicit,default:0,tag:0"`
+ SerialNumber *big.Int
+ SignatureAlgorithm pkix.AlgorithmIdentifier
+ Issuer asn1.RawValue
+ Validity validity
+ Subject asn1.RawValue
+ PublicKey publicKeyInfo
+ UniqueId asn1.BitString `asn1:"optional,tag:1"`
+ SubjectUniqueId asn1.BitString `asn1:"optional,tag:2"`
+ Extensions []pkix.Extension `asn1:"optional,explicit,tag:3"`
+}
+
+// RFC 4055, 4.1
+// The current ASN.1 parser does not support non-integer defaults so
+// the 'default:' tags here do nothing.
+type rsaesoaepAlgorithmParameters struct {
+ HashFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:0,default:sha1Identifier"`
+ MaskgenFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:1,default:mgf1SHA1Identifier"`
+ PSourceFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:2,default:pSpecifiedEmptyIdentifier"`
+}
+
+type dsaAlgorithmParameters struct {
+ P, Q, G *big.Int
+}
+
+type dsaSignature struct {
+ R, S *big.Int
+}
+
+type ecdsaSignature dsaSignature
+
+type validity struct {
+ NotBefore, NotAfter time.Time
+}
+
+type publicKeyInfo struct {
+ Raw asn1.RawContent
+ Algorithm pkix.AlgorithmIdentifier
+ PublicKey asn1.BitString
+}
+
+// RFC 5280, 4.2.1.1
+type authKeyId struct {
+ Id []byte `asn1:"optional,tag:0"`
+}
+
+// SignatureAlgorithm indicates the algorithm used to sign a certificate.
+type SignatureAlgorithm int
+
+// SignatureAlgorithm values:
+const (
+ UnknownSignatureAlgorithm SignatureAlgorithm = iota
+ MD2WithRSA
+ MD5WithRSA
+ SHA1WithRSA
+ SHA256WithRSA
+ SHA384WithRSA
+ SHA512WithRSA
+ DSAWithSHA1
+ DSAWithSHA256
+ ECDSAWithSHA1
+ ECDSAWithSHA256
+ ECDSAWithSHA384
+ ECDSAWithSHA512
+ SHA256WithRSAPSS
+ SHA384WithRSAPSS
+ SHA512WithRSAPSS
+ PureEd25519
+)
+
+// RFC 4055, 6. Basic object identifiers
+var oidpSpecified = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 9}
+
+// These are the default parameters for an RSAES-OAEP pubkey.
+// The current ASN.1 parser does not support non-integer defaults so
+// these currently do nothing.
+var (
+ sha1Identifier = pkix.AlgorithmIdentifier{
+ Algorithm: oidSHA1,
+ Parameters: asn1.NullRawValue,
+ }
+ mgf1SHA1Identifier = pkix.AlgorithmIdentifier{
+ Algorithm: oidMGF1,
+ // RFC 4055, 2.1 sha1Identifier
+ Parameters: asn1.RawValue{
+ Class: asn1.ClassUniversal,
+ Tag: asn1.TagSequence,
+ IsCompound: false,
+ Bytes: []byte{6, 5, 43, 14, 3, 2, 26, 5, 0},
+ FullBytes: []byte{16, 9, 6, 5, 43, 14, 3, 2, 26, 5, 0}},
+ }
+ pSpecifiedEmptyIdentifier = pkix.AlgorithmIdentifier{
+ Algorithm: oidpSpecified,
+ // RFC 4055, 4.1 nullOctetString
+ Parameters: asn1.RawValue{
+ Class: asn1.ClassUniversal,
+ Tag: asn1.TagOctetString,
+ IsCompound: false,
+ Bytes: []byte{},
+ FullBytes: []byte{4, 0}},
+ }
+)
+
+func (algo SignatureAlgorithm) isRSAPSS() bool {
+ switch algo {
+ case SHA256WithRSAPSS, SHA384WithRSAPSS, SHA512WithRSAPSS:
+ return true
+ default:
+ return false
+ }
+}
+
+func (algo SignatureAlgorithm) String() string {
+ for _, details := range signatureAlgorithmDetails {
+ if details.algo == algo {
+ return details.name
+ }
+ }
+ return strconv.Itoa(int(algo))
+}
+
+// PublicKeyAlgorithm indicates the algorithm used for a certificate's public key.
+type PublicKeyAlgorithm int
+
+// PublicKeyAlgorithm values:
+const (
+ UnknownPublicKeyAlgorithm PublicKeyAlgorithm = iota
+ RSA
+ DSA
+ ECDSA
+ Ed25519
+ RSAESOAEP
+)
+
+var publicKeyAlgoName = [...]string{
+ RSA: "RSA",
+ DSA: "DSA",
+ ECDSA: "ECDSA",
+ Ed25519: "Ed25519",
+ RSAESOAEP: "RSAESOAEP",
+}
+
+func (algo PublicKeyAlgorithm) String() string {
+ if 0 < algo && int(algo) < len(publicKeyAlgoName) {
+ return publicKeyAlgoName[algo]
+ }
+ return strconv.Itoa(int(algo))
+}
+
+// OIDs for signature algorithms
+//
+// pkcs-1 OBJECT IDENTIFIER ::= {
+// iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 1 }
+//
+//
+// RFC 3279 2.2.1 RSA Signature Algorithms
+//
+// md2WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 2 }
+//
+// md5WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 4 }
+//
+// sha-1WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 5 }
+//
+// dsaWithSha1 OBJECT IDENTIFIER ::= {
+// iso(1) member-body(2) us(840) x9-57(10040) x9cm(4) 3 }
+//
+// RFC 3279 2.2.3 ECDSA Signature Algorithm
+//
+// ecdsa-with-SHA1 OBJECT IDENTIFIER ::= {
+// iso(1) member-body(2) us(840) ansi-x962(10045)
+// signatures(4) ecdsa-with-SHA1(1)}
+//
+//
+// RFC 4055 5 PKCS #1 Version 1.5
+//
+// sha256WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 11 }
+//
+// sha384WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 12 }
+//
+// sha512WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 13 }
+//
+//
+// RFC 5758 3.1 DSA Signature Algorithms
+//
+// dsaWithSha256 OBJECT IDENTIFIER ::= {
+// joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101)
+// csor(3) algorithms(4) id-dsa-with-sha2(3) 2}
+//
+// RFC 5758 3.2 ECDSA Signature Algorithm
+//
+// ecdsa-with-SHA256 OBJECT IDENTIFIER ::= { iso(1) member-body(2)
+// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 2 }
+//
+// ecdsa-with-SHA384 OBJECT IDENTIFIER ::= { iso(1) member-body(2)
+// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 3 }
+//
+// ecdsa-with-SHA512 OBJECT IDENTIFIER ::= { iso(1) member-body(2)
+// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 4 }
+//
+//
+// RFC 8410 3 Curve25519 and Curve448 Algorithm Identifiers
+//
+// id-Ed25519 OBJECT IDENTIFIER ::= { 1 3 101 112 }
+
+var (
+ oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2}
+ oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4}
+ oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}
+ oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11}
+ oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12}
+ oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13}
+ oidSignatureRSAPSS = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10}
+ oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3}
+ oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2}
+ oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1}
+ oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2}
+ oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3}
+ oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4}
+ oidSignatureEd25519 = asn1.ObjectIdentifier{1, 3, 101, 112}
+
+ oidSHA1 = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 26}
+ oidSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1}
+ oidSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2}
+ oidSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3}
+
+ oidMGF1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 8}
+
+ // oidISOSignatureSHA1WithRSA means the same as oidSignatureSHA1WithRSA
+ // but it's specified by ISO. Microsoft's makecert.exe has been known
+ // to produce certificates with this OID.
+ oidISOSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 29}
+)
+
+var signatureAlgorithmDetails = []struct {
+ algo SignatureAlgorithm
+ name string
+ oid asn1.ObjectIdentifier
+ pubKeyAlgo PublicKeyAlgorithm
+ hash crypto.Hash
+}{
+ {MD2WithRSA, "MD2-RSA", oidSignatureMD2WithRSA, RSA, crypto.Hash(0) /* no value for MD2 */},
+ {MD5WithRSA, "MD5-RSA", oidSignatureMD5WithRSA, RSA, crypto.MD5},
+ {SHA1WithRSA, "SHA1-RSA", oidSignatureSHA1WithRSA, RSA, crypto.SHA1},
+ {SHA1WithRSA, "SHA1-RSA", oidISOSignatureSHA1WithRSA, RSA, crypto.SHA1},
+ {SHA256WithRSA, "SHA256-RSA", oidSignatureSHA256WithRSA, RSA, crypto.SHA256},
+ {SHA384WithRSA, "SHA384-RSA", oidSignatureSHA384WithRSA, RSA, crypto.SHA384},
+ {SHA512WithRSA, "SHA512-RSA", oidSignatureSHA512WithRSA, RSA, crypto.SHA512},
+ {SHA256WithRSAPSS, "SHA256-RSAPSS", oidSignatureRSAPSS, RSA, crypto.SHA256},
+ {SHA384WithRSAPSS, "SHA384-RSAPSS", oidSignatureRSAPSS, RSA, crypto.SHA384},
+ {SHA512WithRSAPSS, "SHA512-RSAPSS", oidSignatureRSAPSS, RSA, crypto.SHA512},
+ {DSAWithSHA1, "DSA-SHA1", oidSignatureDSAWithSHA1, DSA, crypto.SHA1},
+ {DSAWithSHA256, "DSA-SHA256", oidSignatureDSAWithSHA256, DSA, crypto.SHA256},
+ {ECDSAWithSHA1, "ECDSA-SHA1", oidSignatureECDSAWithSHA1, ECDSA, crypto.SHA1},
+ {ECDSAWithSHA256, "ECDSA-SHA256", oidSignatureECDSAWithSHA256, ECDSA, crypto.SHA256},
+ {ECDSAWithSHA384, "ECDSA-SHA384", oidSignatureECDSAWithSHA384, ECDSA, crypto.SHA384},
+ {ECDSAWithSHA512, "ECDSA-SHA512", oidSignatureECDSAWithSHA512, ECDSA, crypto.SHA512},
+ {PureEd25519, "Ed25519", oidSignatureEd25519, Ed25519, crypto.Hash(0) /* no pre-hashing */},
+}
+
+// pssParameters reflects the parameters in an AlgorithmIdentifier that
+// specifies RSA PSS. See RFC 3447, Appendix A.2.3.
+type pssParameters struct {
+ // The following three fields are not marked as
+ // optional because the default values specify SHA-1,
+ // which is no longer suitable for use in signatures.
+ Hash pkix.AlgorithmIdentifier `asn1:"explicit,tag:0"`
+ MGF pkix.AlgorithmIdentifier `asn1:"explicit,tag:1"`
+ SaltLength int `asn1:"explicit,tag:2"`
+ TrailerField int `asn1:"optional,explicit,tag:3,default:1"`
+}
+
+// rsaPSSParameters returns an asn1.RawValue suitable for use as the Parameters
+// in an AlgorithmIdentifier that specifies RSA PSS.
+func rsaPSSParameters(hashFunc crypto.Hash) asn1.RawValue {
+ var hashOID asn1.ObjectIdentifier
+
+ switch hashFunc {
+ case crypto.SHA256:
+ hashOID = oidSHA256
+ case crypto.SHA384:
+ hashOID = oidSHA384
+ case crypto.SHA512:
+ hashOID = oidSHA512
+ }
+
+ params := pssParameters{
+ Hash: pkix.AlgorithmIdentifier{
+ Algorithm: hashOID,
+ Parameters: asn1.NullRawValue,
+ },
+ MGF: pkix.AlgorithmIdentifier{
+ Algorithm: oidMGF1,
+ },
+ SaltLength: hashFunc.Size(),
+ TrailerField: 1,
+ }
+
+ mgf1Params := pkix.AlgorithmIdentifier{
+ Algorithm: hashOID,
+ Parameters: asn1.NullRawValue,
+ }
+
+ var err error
+ params.MGF.Parameters.FullBytes, err = asn1.Marshal(mgf1Params)
+ if err != nil {
+ panic(err)
+ }
+
+ serialized, err := asn1.Marshal(params)
+ if err != nil {
+ panic(err)
+ }
+
+ return asn1.RawValue{FullBytes: serialized}
+}
+
+// SignatureAlgorithmFromAI converts an PKIX algorithm identifier to the
+// equivalent local constant.
+func SignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) SignatureAlgorithm {
+ if ai.Algorithm.Equal(oidSignatureEd25519) {
+ // RFC 8410, Section 3
+ // > For all of the OIDs, the parameters MUST be absent.
+ if len(ai.Parameters.FullBytes) != 0 {
+ return UnknownSignatureAlgorithm
+ }
+ }
+
+ if !ai.Algorithm.Equal(oidSignatureRSAPSS) {
+ for _, details := range signatureAlgorithmDetails {
+ if ai.Algorithm.Equal(details.oid) {
+ return details.algo
+ }
+ }
+ return UnknownSignatureAlgorithm
+ }
+
+ // RSA PSS is special because it encodes important parameters
+ // in the Parameters.
+
+ var params pssParameters
+ if _, err := asn1.Unmarshal(ai.Parameters.FullBytes, ¶ms); err != nil {
+ return UnknownSignatureAlgorithm
+ }
+
+ var mgf1HashFunc pkix.AlgorithmIdentifier
+ if _, err := asn1.Unmarshal(params.MGF.Parameters.FullBytes, &mgf1HashFunc); err != nil {
+ return UnknownSignatureAlgorithm
+ }
+
+ // PSS is greatly overburdened with options. This code forces them into
+ // three buckets by requiring that the MGF1 hash function always match the
+ // message hash function (as recommended in RFC 3447, Section 8.1), that the
+ // salt length matches the hash length, and that the trailer field has the
+ // default value.
+ if (len(params.Hash.Parameters.FullBytes) != 0 && !bytes.Equal(params.Hash.Parameters.FullBytes, asn1.NullBytes)) ||
+ !params.MGF.Algorithm.Equal(oidMGF1) ||
+ !mgf1HashFunc.Algorithm.Equal(params.Hash.Algorithm) ||
+ (len(mgf1HashFunc.Parameters.FullBytes) != 0 && !bytes.Equal(mgf1HashFunc.Parameters.FullBytes, asn1.NullBytes)) ||
+ params.TrailerField != 1 {
+ return UnknownSignatureAlgorithm
+ }
+
+ switch {
+ case params.Hash.Algorithm.Equal(oidSHA256) && params.SaltLength == 32:
+ return SHA256WithRSAPSS
+ case params.Hash.Algorithm.Equal(oidSHA384) && params.SaltLength == 48:
+ return SHA384WithRSAPSS
+ case params.Hash.Algorithm.Equal(oidSHA512) && params.SaltLength == 64:
+ return SHA512WithRSAPSS
+ }
+
+ return UnknownSignatureAlgorithm
+}
+
+// RFC 3279, 2.3 Public Key Algorithms
+//
+// pkcs-1 OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840)
+//
+// rsadsi(113549) pkcs(1) 1 }
+//
+// rsaEncryption OBJECT IDENTIFIER ::== { pkcs1-1 1 }
+//
+// id-dsa OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840)
+//
+// x9-57(10040) x9cm(4) 1 }
+//
+// # RFC 5480, 2.1.1 Unrestricted Algorithm Identifier and Parameters
+//
+// id-ecPublicKey OBJECT IDENTIFIER ::= {
+// iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 }
+var (
+ OIDPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1}
+ OIDPublicKeyRSAESOAEP = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 7}
+ OIDPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1}
+ OIDPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1}
+ OIDPublicKeyRSAObsolete = asn1.ObjectIdentifier{2, 5, 8, 1, 1}
+ OIDPublicKeyEd25519 = oidSignatureEd25519
+)
+
+func getPublicKeyAlgorithmFromOID(oid asn1.ObjectIdentifier) PublicKeyAlgorithm {
+ switch {
+ case oid.Equal(OIDPublicKeyRSA):
+ return RSA
+ case oid.Equal(OIDPublicKeyDSA):
+ return DSA
+ case oid.Equal(OIDPublicKeyECDSA):
+ return ECDSA
+ case oid.Equal(OIDPublicKeyRSAESOAEP):
+ return RSAESOAEP
+ case oid.Equal(OIDPublicKeyEd25519):
+ return Ed25519
+ }
+ return UnknownPublicKeyAlgorithm
+}
+
+// RFC 5480, 2.1.1.1. Named Curve
+//
+// secp224r1 OBJECT IDENTIFIER ::= {
+// iso(1) identified-organization(3) certicom(132) curve(0) 33 }
+//
+// secp256r1 OBJECT IDENTIFIER ::= {
+// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3)
+// prime(1) 7 }
+//
+// secp384r1 OBJECT IDENTIFIER ::= {
+// iso(1) identified-organization(3) certicom(132) curve(0) 34 }
+//
+// secp521r1 OBJECT IDENTIFIER ::= {
+// iso(1) identified-organization(3) certicom(132) curve(0) 35 }
+//
+// secp192r1 OBJECT IDENTIFIER ::= {
+// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3)
+// prime(1) 1 }
+//
+// NB: secp256r1 is equivalent to prime256v1,
+// secp192r1 is equivalent to ansix9p192r and prime192v1
+var (
+ OIDNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33}
+ OIDNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7}
+ OIDNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34}
+ OIDNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35}
+ OIDNamedCurveP192 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 1}
+)
+
+func namedCurveFromOID(oid asn1.ObjectIdentifier, nfe *NonFatalErrors) elliptic.Curve {
+ switch {
+ case oid.Equal(OIDNamedCurveP224):
+ return elliptic.P224()
+ case oid.Equal(OIDNamedCurveP256):
+ return elliptic.P256()
+ case oid.Equal(OIDNamedCurveP384):
+ return elliptic.P384()
+ case oid.Equal(OIDNamedCurveP521):
+ return elliptic.P521()
+ case oid.Equal(OIDNamedCurveP192):
+ nfe.AddError(errors.New("insecure curve (secp192r1) specified"))
+ return secp192r1()
+ }
+ return nil
+}
+
+// OIDFromNamedCurve returns the OID used to specify the use of the given
+// elliptic curve.
+func OIDFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) {
+ switch curve {
+ case elliptic.P224():
+ return OIDNamedCurveP224, true
+ case elliptic.P256():
+ return OIDNamedCurveP256, true
+ case elliptic.P384():
+ return OIDNamedCurveP384, true
+ case elliptic.P521():
+ return OIDNamedCurveP521, true
+ case secp192r1():
+ return OIDNamedCurveP192, true
+ }
+
+ return nil, false
+}
+
+// KeyUsage represents the set of actions that are valid for a given key. It's
+// a bitmap of the KeyUsage* constants.
+type KeyUsage int
+
+// KeyUsage values:
+const (
+ KeyUsageDigitalSignature KeyUsage = 1 << iota
+ KeyUsageContentCommitment
+ KeyUsageKeyEncipherment
+ KeyUsageDataEncipherment
+ KeyUsageKeyAgreement
+ KeyUsageCertSign
+ KeyUsageCRLSign
+ KeyUsageEncipherOnly
+ KeyUsageDecipherOnly
+)
+
+// RFC 5280, 4.2.1.12 Extended Key Usage
+//
+// anyExtendedKeyUsage OBJECT IDENTIFIER ::= { id-ce-extKeyUsage 0 }
+//
+// id-kp OBJECT IDENTIFIER ::= { id-pkix 3 }
+//
+// id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 }
+// id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 }
+// id-kp-codeSigning OBJECT IDENTIFIER ::= { id-kp 3 }
+// id-kp-emailProtection OBJECT IDENTIFIER ::= { id-kp 4 }
+// id-kp-timeStamping OBJECT IDENTIFIER ::= { id-kp 8 }
+// id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 }
+var (
+ oidExtKeyUsageAny = asn1.ObjectIdentifier{2, 5, 29, 37, 0}
+ oidExtKeyUsageServerAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 1}
+ oidExtKeyUsageClientAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 2}
+ oidExtKeyUsageCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 3}
+ oidExtKeyUsageEmailProtection = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 4}
+ oidExtKeyUsageIPSECEndSystem = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 5}
+ oidExtKeyUsageIPSECTunnel = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 6}
+ oidExtKeyUsageIPSECUser = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 7}
+ oidExtKeyUsageTimeStamping = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 8}
+ oidExtKeyUsageOCSPSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 9}
+ oidExtKeyUsageMicrosoftServerGatedCrypto = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 3}
+ oidExtKeyUsageNetscapeServerGatedCrypto = asn1.ObjectIdentifier{2, 16, 840, 1, 113730, 4, 1}
+ oidExtKeyUsageMicrosoftCommercialCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 2, 1, 22}
+ oidExtKeyUsageMicrosoftKernelCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 61, 1, 1}
+ // RFC 6962 s3.1
+ oidExtKeyUsageCertificateTransparency = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 4}
+)
+
+// ExtKeyUsage represents an extended set of actions that are valid for a given key.
+// Each of the ExtKeyUsage* constants define a unique action.
+type ExtKeyUsage int
+
+// ExtKeyUsage values:
+const (
+ ExtKeyUsageAny ExtKeyUsage = iota
+ ExtKeyUsageServerAuth
+ ExtKeyUsageClientAuth
+ ExtKeyUsageCodeSigning
+ ExtKeyUsageEmailProtection
+ ExtKeyUsageIPSECEndSystem
+ ExtKeyUsageIPSECTunnel
+ ExtKeyUsageIPSECUser
+ ExtKeyUsageTimeStamping
+ ExtKeyUsageOCSPSigning
+ ExtKeyUsageMicrosoftServerGatedCrypto
+ ExtKeyUsageNetscapeServerGatedCrypto
+ ExtKeyUsageMicrosoftCommercialCodeSigning
+ ExtKeyUsageMicrosoftKernelCodeSigning
+ ExtKeyUsageCertificateTransparency
+)
+
+// extKeyUsageOIDs contains the mapping between an ExtKeyUsage and its OID.
+var extKeyUsageOIDs = []struct {
+ extKeyUsage ExtKeyUsage
+ oid asn1.ObjectIdentifier
+}{
+ {ExtKeyUsageAny, oidExtKeyUsageAny},
+ {ExtKeyUsageServerAuth, oidExtKeyUsageServerAuth},
+ {ExtKeyUsageClientAuth, oidExtKeyUsageClientAuth},
+ {ExtKeyUsageCodeSigning, oidExtKeyUsageCodeSigning},
+ {ExtKeyUsageEmailProtection, oidExtKeyUsageEmailProtection},
+ {ExtKeyUsageIPSECEndSystem, oidExtKeyUsageIPSECEndSystem},
+ {ExtKeyUsageIPSECTunnel, oidExtKeyUsageIPSECTunnel},
+ {ExtKeyUsageIPSECUser, oidExtKeyUsageIPSECUser},
+ {ExtKeyUsageTimeStamping, oidExtKeyUsageTimeStamping},
+ {ExtKeyUsageOCSPSigning, oidExtKeyUsageOCSPSigning},
+ {ExtKeyUsageMicrosoftServerGatedCrypto, oidExtKeyUsageMicrosoftServerGatedCrypto},
+ {ExtKeyUsageNetscapeServerGatedCrypto, oidExtKeyUsageNetscapeServerGatedCrypto},
+ {ExtKeyUsageMicrosoftCommercialCodeSigning, oidExtKeyUsageMicrosoftCommercialCodeSigning},
+ {ExtKeyUsageMicrosoftKernelCodeSigning, oidExtKeyUsageMicrosoftKernelCodeSigning},
+ {ExtKeyUsageCertificateTransparency, oidExtKeyUsageCertificateTransparency},
+}
+
+func extKeyUsageFromOID(oid asn1.ObjectIdentifier) (eku ExtKeyUsage, ok bool) {
+ for _, pair := range extKeyUsageOIDs {
+ if oid.Equal(pair.oid) {
+ return pair.extKeyUsage, true
+ }
+ }
+ return
+}
+
+func oidFromExtKeyUsage(eku ExtKeyUsage) (oid asn1.ObjectIdentifier, ok bool) {
+ for _, pair := range extKeyUsageOIDs {
+ if eku == pair.extKeyUsage {
+ return pair.oid, true
+ }
+ }
+ return
+}
+
+// SerializedSCT represents a single TLS-encoded signed certificate timestamp, from RFC6962 s3.3.
+type SerializedSCT struct {
+ Val []byte `tls:"minlen:1,maxlen:65535"`
+}
+
+// SignedCertificateTimestampList is a list of signed certificate timestamps, from RFC6962 s3.3.
+type SignedCertificateTimestampList struct {
+ SCTList []SerializedSCT `tls:"minlen:1,maxlen:65335"`
+}
+
+// A Certificate represents an X.509 certificate.
+type Certificate struct {
+ Raw []byte // Complete ASN.1 DER content (certificate, signature algorithm and signature).
+ RawTBSCertificate []byte // Certificate part of raw ASN.1 DER content.
+ RawSubjectPublicKeyInfo []byte // DER encoded SubjectPublicKeyInfo.
+ RawSubject []byte // DER encoded Subject
+ RawIssuer []byte // DER encoded Issuer
+
+ Signature []byte
+ SignatureAlgorithm SignatureAlgorithm
+
+ PublicKeyAlgorithm PublicKeyAlgorithm
+ PublicKey interface{}
+
+ Version int
+ SerialNumber *big.Int
+ Issuer pkix.Name
+ Subject pkix.Name
+ NotBefore, NotAfter time.Time // Validity bounds.
+ KeyUsage KeyUsage
+
+ // Extensions contains raw X.509 extensions. When parsing certificates,
+ // this can be used to extract non-critical extensions that are not
+ // parsed by this package. When marshaling certificates, the Extensions
+ // field is ignored, see ExtraExtensions.
+ Extensions []pkix.Extension
+
+ // ExtraExtensions contains extensions to be copied, raw, into any
+ // marshaled certificates. Values override any extensions that would
+ // otherwise be produced based on the other fields. The ExtraExtensions
+ // field is not populated when parsing certificates, see Extensions.
+ ExtraExtensions []pkix.Extension
+
+ // UnhandledCriticalExtensions contains a list of extension IDs that
+ // were not (fully) processed when parsing. Verify will fail if this
+ // slice is non-empty, unless verification is delegated to an OS
+ // library which understands all the critical extensions.
+ //
+ // Users can access these extensions using Extensions and can remove
+ // elements from this slice if they believe that they have been
+ // handled.
+ UnhandledCriticalExtensions []asn1.ObjectIdentifier
+
+ ExtKeyUsage []ExtKeyUsage // Sequence of extended key usages.
+ UnknownExtKeyUsage []asn1.ObjectIdentifier // Encountered extended key usages unknown to this package.
+
+ // BasicConstraintsValid indicates whether IsCA, MaxPathLen,
+ // and MaxPathLenZero are valid.
+ BasicConstraintsValid bool
+ IsCA bool
+
+ // MaxPathLen and MaxPathLenZero indicate the presence and
+ // value of the BasicConstraints' "pathLenConstraint".
+ //
+ // When parsing a certificate, a positive non-zero MaxPathLen
+ // means that the field was specified, -1 means it was unset,
+ // and MaxPathLenZero being true mean that the field was
+ // explicitly set to zero. The case of MaxPathLen==0 with MaxPathLenZero==false
+ // should be treated equivalent to -1 (unset).
+ //
+ // When generating a certificate, an unset pathLenConstraint
+ // can be requested with either MaxPathLen == -1 or using the
+ // zero value for both MaxPathLen and MaxPathLenZero.
+ MaxPathLen int
+ // MaxPathLenZero indicates that BasicConstraintsValid==true
+ // and MaxPathLen==0 should be interpreted as an actual
+ // maximum path length of zero. Otherwise, that combination is
+ // interpreted as MaxPathLen not being set.
+ MaxPathLenZero bool
+
+ SubjectKeyId []byte
+ AuthorityKeyId []byte
+
+ // RFC 5280, 4.2.2.1 (Authority Information Access)
+ OCSPServer []string
+ IssuingCertificateURL []string
+
+ // Subject Information Access
+ SubjectTimestamps []string
+ SubjectCARepositories []string
+
+ // Subject Alternate Name values. (Note that these values may not be valid
+ // if invalid values were contained within a parsed certificate. For
+ // example, an element of DNSNames may not be a valid DNS domain name.)
+ DNSNames []string
+ EmailAddresses []string
+ IPAddresses []net.IP
+ URIs []*url.URL
+
+ // Name constraints
+ PermittedDNSDomainsCritical bool // if true then the name constraints are marked critical.
+ PermittedDNSDomains []string
+ ExcludedDNSDomains []string
+ PermittedIPRanges []*net.IPNet
+ ExcludedIPRanges []*net.IPNet
+ PermittedEmailAddresses []string
+ ExcludedEmailAddresses []string
+ PermittedURIDomains []string
+ ExcludedURIDomains []string
+
+ // CRL Distribution Points
+ CRLDistributionPoints []string
+
+ PolicyIdentifiers []asn1.ObjectIdentifier
+
+ RPKIAddressRanges []*IPAddressFamilyBlocks
+ RPKIASNumbers, RPKIRoutingDomainIDs *ASIdentifiers
+
+ // Certificate Transparency SCT extension contents; this is a TLS-encoded
+ // SignedCertificateTimestampList (RFC 6962 s3.3).
+ RawSCT []byte
+ SCTList SignedCertificateTimestampList
+}
+
+// ErrUnsupportedAlgorithm results from attempting to perform an operation that
+// involves algorithms that are not currently implemented.
+var ErrUnsupportedAlgorithm = errors.New("x509: cannot verify signature: algorithm unimplemented")
+
+// InsecureAlgorithmError results when the signature algorithm for a certificate
+// is known to be insecure.
+type InsecureAlgorithmError SignatureAlgorithm
+
+func (e InsecureAlgorithmError) Error() string {
+ return fmt.Sprintf("x509: cannot verify signature: insecure algorithm %v", SignatureAlgorithm(e))
+}
+
+// ConstraintViolationError results when a requested usage is not permitted by
+// a certificate. For example: checking a signature when the public key isn't a
+// certificate signing key.
+type ConstraintViolationError struct{}
+
+func (ConstraintViolationError) Error() string {
+ return "x509: invalid signature: parent certificate cannot sign this kind of certificate"
+}
+
+// Equal indicates whether two Certificate objects are equal (by comparing their
+// DER-encoded values).
+func (c *Certificate) Equal(other *Certificate) bool {
+ if c == nil || other == nil {
+ return c == other
+ }
+ return bytes.Equal(c.Raw, other.Raw)
+}
+
+// IsPrecertificate checks whether the certificate is a precertificate, by
+// checking for the presence of the CT Poison extension.
+func (c *Certificate) IsPrecertificate() bool {
+ if c == nil {
+ return false
+ }
+ for _, ext := range c.Extensions {
+ if ext.Id.Equal(OIDExtensionCTPoison) {
+ return true
+ }
+ }
+ return false
+}
+
+func (c *Certificate) hasSANExtension() bool {
+ return oidInExtensions(OIDExtensionSubjectAltName, c.Extensions)
+}
+
+// Entrust have a broken root certificate (CN=Entrust.net Certification
+// Authority (2048)) which isn't marked as a CA certificate and is thus invalid
+// according to PKIX.
+// We recognise this certificate by its SubjectPublicKeyInfo and exempt it
+// from the Basic Constraints requirement.
+// See http://www.entrust.net/knowledge-base/technote.cfm?tn=7869
+//
+// TODO(agl): remove this hack once their reissued root is sufficiently
+// widespread.
+var entrustBrokenSPKI = []byte{
+ 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09,
+ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01,
+ 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00,
+ 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01,
+ 0x00, 0x97, 0xa3, 0x2d, 0x3c, 0x9e, 0xde, 0x05,
+ 0xda, 0x13, 0xc2, 0x11, 0x8d, 0x9d, 0x8e, 0xe3,
+ 0x7f, 0xc7, 0x4b, 0x7e, 0x5a, 0x9f, 0xb3, 0xff,
+ 0x62, 0xab, 0x73, 0xc8, 0x28, 0x6b, 0xba, 0x10,
+ 0x64, 0x82, 0x87, 0x13, 0xcd, 0x57, 0x18, 0xff,
+ 0x28, 0xce, 0xc0, 0xe6, 0x0e, 0x06, 0x91, 0x50,
+ 0x29, 0x83, 0xd1, 0xf2, 0xc3, 0x2a, 0xdb, 0xd8,
+ 0xdb, 0x4e, 0x04, 0xcc, 0x00, 0xeb, 0x8b, 0xb6,
+ 0x96, 0xdc, 0xbc, 0xaa, 0xfa, 0x52, 0x77, 0x04,
+ 0xc1, 0xdb, 0x19, 0xe4, 0xae, 0x9c, 0xfd, 0x3c,
+ 0x8b, 0x03, 0xef, 0x4d, 0xbc, 0x1a, 0x03, 0x65,
+ 0xf9, 0xc1, 0xb1, 0x3f, 0x72, 0x86, 0xf2, 0x38,
+ 0xaa, 0x19, 0xae, 0x10, 0x88, 0x78, 0x28, 0xda,
+ 0x75, 0xc3, 0x3d, 0x02, 0x82, 0x02, 0x9c, 0xb9,
+ 0xc1, 0x65, 0x77, 0x76, 0x24, 0x4c, 0x98, 0xf7,
+ 0x6d, 0x31, 0x38, 0xfb, 0xdb, 0xfe, 0xdb, 0x37,
+ 0x02, 0x76, 0xa1, 0x18, 0x97, 0xa6, 0xcc, 0xde,
+ 0x20, 0x09, 0x49, 0x36, 0x24, 0x69, 0x42, 0xf6,
+ 0xe4, 0x37, 0x62, 0xf1, 0x59, 0x6d, 0xa9, 0x3c,
+ 0xed, 0x34, 0x9c, 0xa3, 0x8e, 0xdb, 0xdc, 0x3a,
+ 0xd7, 0xf7, 0x0a, 0x6f, 0xef, 0x2e, 0xd8, 0xd5,
+ 0x93, 0x5a, 0x7a, 0xed, 0x08, 0x49, 0x68, 0xe2,
+ 0x41, 0xe3, 0x5a, 0x90, 0xc1, 0x86, 0x55, 0xfc,
+ 0x51, 0x43, 0x9d, 0xe0, 0xb2, 0xc4, 0x67, 0xb4,
+ 0xcb, 0x32, 0x31, 0x25, 0xf0, 0x54, 0x9f, 0x4b,
+ 0xd1, 0x6f, 0xdb, 0xd4, 0xdd, 0xfc, 0xaf, 0x5e,
+ 0x6c, 0x78, 0x90, 0x95, 0xde, 0xca, 0x3a, 0x48,
+ 0xb9, 0x79, 0x3c, 0x9b, 0x19, 0xd6, 0x75, 0x05,
+ 0xa0, 0xf9, 0x88, 0xd7, 0xc1, 0xe8, 0xa5, 0x09,
+ 0xe4, 0x1a, 0x15, 0xdc, 0x87, 0x23, 0xaa, 0xb2,
+ 0x75, 0x8c, 0x63, 0x25, 0x87, 0xd8, 0xf8, 0x3d,
+ 0xa6, 0xc2, 0xcc, 0x66, 0xff, 0xa5, 0x66, 0x68,
+ 0x55, 0x02, 0x03, 0x01, 0x00, 0x01,
+}
+
+// CheckSignatureFrom verifies that the signature on c is a valid signature
+// from parent.
+func (c *Certificate) CheckSignatureFrom(parent *Certificate) error {
+ // RFC 5280, 4.2.1.9:
+ // "If the basic constraints extension is not present in a version 3
+ // certificate, or the extension is present but the cA boolean is not
+ // asserted, then the certified public key MUST NOT be used to verify
+ // certificate signatures."
+ // (except for Entrust, see comment above entrustBrokenSPKI)
+ if (parent.Version == 3 && !parent.BasicConstraintsValid ||
+ parent.BasicConstraintsValid && !parent.IsCA) &&
+ !bytes.Equal(c.RawSubjectPublicKeyInfo, entrustBrokenSPKI) {
+ return ConstraintViolationError{}
+ }
+
+ if parent.KeyUsage != 0 && parent.KeyUsage&KeyUsageCertSign == 0 {
+ return ConstraintViolationError{}
+ }
+
+ if parent.PublicKeyAlgorithm == UnknownPublicKeyAlgorithm {
+ return ErrUnsupportedAlgorithm
+ }
+
+ // TODO(agl): don't ignore the path length constraint.
+
+ return parent.CheckSignature(c.SignatureAlgorithm, c.RawTBSCertificate, c.Signature)
+}
+
+// CheckSignature verifies that signature is a valid signature over signed from
+// c's public key.
+func (c *Certificate) CheckSignature(algo SignatureAlgorithm, signed, signature []byte) error {
+ return checkSignature(algo, signed, signature, c.PublicKey)
+}
+
+func (c *Certificate) hasNameConstraints() bool {
+ return oidInExtensions(OIDExtensionNameConstraints, c.Extensions)
+}
+
+func (c *Certificate) getSANExtension() []byte {
+ for _, e := range c.Extensions {
+ if e.Id.Equal(OIDExtensionSubjectAltName) {
+ return e.Value
+ }
+ }
+
+ return nil
+}
+
+func signaturePublicKeyAlgoMismatchError(expectedPubKeyAlgo PublicKeyAlgorithm, pubKey interface{}) error {
+ return fmt.Errorf("x509: signature algorithm specifies an %s public key, but have public key of type %T", expectedPubKeyAlgo.String(), pubKey)
+}
+
+// CheckSignature verifies that signature is a valid signature over signed from
+// a crypto.PublicKey.
+func checkSignature(algo SignatureAlgorithm, signed, signature []byte, publicKey crypto.PublicKey) (err error) {
+ var hashType crypto.Hash
+ var pubKeyAlgo PublicKeyAlgorithm
+
+ for _, details := range signatureAlgorithmDetails {
+ if details.algo == algo {
+ hashType = details.hash
+ pubKeyAlgo = details.pubKeyAlgo
+ }
+ }
+
+ switch hashType {
+ case crypto.Hash(0):
+ if pubKeyAlgo != Ed25519 {
+ return ErrUnsupportedAlgorithm
+ }
+ case crypto.MD5:
+ return InsecureAlgorithmError(algo)
+ default:
+ if !hashType.Available() {
+ return ErrUnsupportedAlgorithm
+ }
+ h := hashType.New()
+ h.Write(signed)
+ signed = h.Sum(nil)
+ }
+
+ switch pub := publicKey.(type) {
+ case *rsa.PublicKey:
+ if pubKeyAlgo != RSA {
+ return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub)
+ }
+ if algo.isRSAPSS() {
+ return rsa.VerifyPSS(pub, hashType, signed, signature, &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash})
+ } else {
+ return rsa.VerifyPKCS1v15(pub, hashType, signed, signature)
+ }
+ case *dsa.PublicKey:
+ if pubKeyAlgo != DSA {
+ return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub)
+ }
+ dsaSig := new(dsaSignature)
+ if rest, err := asn1.Unmarshal(signature, dsaSig); err != nil {
+ return err
+ } else if len(rest) != 0 {
+ return errors.New("x509: trailing data after DSA signature")
+ }
+ if dsaSig.R.Sign() <= 0 || dsaSig.S.Sign() <= 0 {
+ return errors.New("x509: DSA signature contained zero or negative values")
+ }
+ // According to FIPS 186-3, section 4.6, the hash must be truncated if it is longer
+ // than the key length, but crypto/dsa doesn't do it automatically.
+ if maxHashLen := pub.Q.BitLen() / 8; maxHashLen < len(signed) {
+ signed = signed[:maxHashLen]
+ }
+ if !dsa.Verify(pub, signed, dsaSig.R, dsaSig.S) {
+ return errors.New("x509: DSA verification failure")
+ }
+ return
+ case *ecdsa.PublicKey:
+ if pubKeyAlgo != ECDSA {
+ return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub)
+ }
+ ecdsaSig := new(ecdsaSignature)
+ if rest, err := asn1.Unmarshal(signature, ecdsaSig); err != nil {
+ return err
+ } else if len(rest) != 0 {
+ return errors.New("x509: trailing data after ECDSA signature")
+ }
+ if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 {
+ return errors.New("x509: ECDSA signature contained zero or negative values")
+ }
+ if !ecdsa.Verify(pub, signed, ecdsaSig.R, ecdsaSig.S) {
+ return errors.New("x509: ECDSA verification failure")
+ }
+ return
+ case ed25519.PublicKey:
+ if pubKeyAlgo != Ed25519 {
+ return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub)
+ }
+ if !ed25519.Verify(pub, signed, signature) {
+ return errors.New("x509: Ed25519 verification failure")
+ }
+ return
+ }
+ return ErrUnsupportedAlgorithm
+}
+
+// CheckCRLSignature checks that the signature in crl is from c.
+func (c *Certificate) CheckCRLSignature(crl *pkix.CertificateList) error {
+ algo := SignatureAlgorithmFromAI(crl.SignatureAlgorithm)
+ return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign())
+}
+
+// UnhandledCriticalExtension results when the certificate contains an extension
+// that is marked as critical but which is not handled by this library.
+type UnhandledCriticalExtension struct {
+ ID asn1.ObjectIdentifier
+}
+
+func (h UnhandledCriticalExtension) Error() string {
+ return fmt.Sprintf("x509: unhandled critical extension (%v)", h.ID)
+}
+
+// removeExtension takes a DER-encoded TBSCertificate, removes the extension
+// specified by oid (preserving the order of other extensions), and returns the
+// result still as a DER-encoded TBSCertificate. This function will fail if
+// there is not exactly 1 extension of the type specified by the oid present.
+func removeExtension(tbsData []byte, oid asn1.ObjectIdentifier) ([]byte, error) {
+ var tbs tbsCertificate
+ rest, err := asn1.Unmarshal(tbsData, &tbs)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse TBSCertificate: %v", err)
+ } else if rLen := len(rest); rLen > 0 {
+ return nil, fmt.Errorf("trailing data (%d bytes) after TBSCertificate", rLen)
+ }
+ extAt := -1
+ for i, ext := range tbs.Extensions {
+ if ext.Id.Equal(oid) {
+ if extAt != -1 {
+ return nil, errors.New("multiple extensions of specified type present")
+ }
+ extAt = i
+ }
+ }
+ if extAt == -1 {
+ return nil, errors.New("no extension of specified type present")
+ }
+ tbs.Extensions = append(tbs.Extensions[:extAt], tbs.Extensions[extAt+1:]...)
+ // Clear out the asn1.RawContent so the re-marshal operation sees the
+ // updated structure (rather than just copying the out-of-date DER data).
+ tbs.Raw = nil
+
+ data, err := asn1.Marshal(tbs)
+ if err != nil {
+ return nil, fmt.Errorf("failed to re-marshal TBSCertificate: %v", err)
+ }
+ return data, nil
+}
+
+// RemoveSCTList takes a DER-encoded TBSCertificate and removes the CT SCT
+// extension that contains the SCT list (preserving the order of other
+// extensions), and returns the result still as a DER-encoded TBSCertificate.
+// This function will fail if there is not exactly 1 CT SCT extension present.
+func RemoveSCTList(tbsData []byte) ([]byte, error) {
+ return removeExtension(tbsData, OIDExtensionCTSCT)
+}
+
+// RemoveCTPoison takes a DER-encoded TBSCertificate and removes the CT poison
+// extension (preserving the order of other extensions), and returns the result
+// still as a DER-encoded TBSCertificate. This function will fail if there is
+// not exactly 1 CT poison extension present.
+func RemoveCTPoison(tbsData []byte) ([]byte, error) {
+ return BuildPrecertTBS(tbsData, nil)
+}
+
+// BuildPrecertTBS builds a Certificate Transparency pre-certificate (RFC 6962
+// s3.1) from the given DER-encoded TBSCertificate, returning a DER-encoded
+// TBSCertificate.
+//
+// This function removes the CT poison extension (there must be exactly 1 of
+// these), preserving the order of other extensions.
+//
+// If preIssuer is provided, this should be a special intermediate certificate
+// that was used to sign the precert (indicated by having the special
+// CertificateTransparency extended key usage). In this case, the issuance
+// information of the pre-cert is updated to reflect the next issuer in the
+// chain, i.e. the issuer of this special intermediate:
+// - The precert's Issuer is changed to the Issuer of the intermediate
+// - The precert's AuthorityKeyId is changed to the AuthorityKeyId of the
+// intermediate.
+func BuildPrecertTBS(tbsData []byte, preIssuer *Certificate) ([]byte, error) {
+ data, err := removeExtension(tbsData, OIDExtensionCTPoison)
+ if err != nil {
+ return nil, err
+ }
+
+ var tbs tbsCertificate
+ rest, err := asn1.Unmarshal(data, &tbs)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse TBSCertificate: %v", err)
+ } else if rLen := len(rest); rLen > 0 {
+ return nil, fmt.Errorf("trailing data (%d bytes) after TBSCertificate", rLen)
+ }
+
+ if preIssuer != nil {
+ // Update the precert's Issuer field. Use the RawIssuer rather than the
+ // parsed Issuer to avoid any chance of ASN.1 differences (e.g. switching
+ // from UTF8String to PrintableString).
+ tbs.Issuer.FullBytes = preIssuer.RawIssuer
+
+ // Also need to update the cert's AuthorityKeyID extension
+ // to that of the preIssuer.
+ var issuerKeyID []byte
+ for _, ext := range preIssuer.Extensions {
+ if ext.Id.Equal(OIDExtensionAuthorityKeyId) {
+ issuerKeyID = ext.Value
+ break
+ }
+ }
+
+ // Check the preIssuer has the CT EKU.
+ seenCTEKU := false
+ for _, eku := range preIssuer.ExtKeyUsage {
+ if eku == ExtKeyUsageCertificateTransparency {
+ seenCTEKU = true
+ break
+ }
+ }
+ if !seenCTEKU {
+ return nil, fmt.Errorf("issuer does not have CertificateTransparency extended key usage")
+ }
+
+ keyAt := -1
+ for i, ext := range tbs.Extensions {
+ if ext.Id.Equal(OIDExtensionAuthorityKeyId) {
+ keyAt = i
+ break
+ }
+ }
+ if keyAt >= 0 {
+ // PreCert has an auth-key-id; replace it with the value from the preIssuer
+ if issuerKeyID != nil {
+ tbs.Extensions[keyAt].Value = issuerKeyID
+ } else {
+ tbs.Extensions = append(tbs.Extensions[:keyAt], tbs.Extensions[keyAt+1:]...)
+ }
+ } else if issuerKeyID != nil {
+ // PreCert did not have an auth-key-id, but the preIssuer does, so add it at the end.
+ authKeyIDExt := pkix.Extension{
+ Id: OIDExtensionAuthorityKeyId,
+ Critical: false,
+ Value: issuerKeyID,
+ }
+ tbs.Extensions = append(tbs.Extensions, authKeyIDExt)
+ }
+
+ // Clear out the asn1.RawContent so the re-marshal operation sees the
+ // updated structure (rather than just copying the out-of-date DER data).
+ tbs.Raw = nil
+ }
+
+ data, err = asn1.Marshal(tbs)
+ if err != nil {
+ return nil, fmt.Errorf("failed to re-marshal TBSCertificate: %v", err)
+ }
+ return data, nil
+}
+
+type basicConstraints struct {
+ IsCA bool `asn1:"optional"`
+ MaxPathLen int `asn1:"optional,default:-1"`
+}
+
+// RFC 5280, 4.2.1.4
+type policyInformation struct {
+ Policy asn1.ObjectIdentifier
+ // policyQualifiers omitted
+}
+
+const (
+ nameTypeEmail = 1
+ nameTypeDNS = 2
+ nameTypeURI = 6
+ nameTypeIP = 7
+)
+
+// RFC 5280, 4.2.2.1
+type accessDescription struct {
+ Method asn1.ObjectIdentifier
+ Location asn1.RawValue
+}
+
+// RFC 5280, 4.2.1.14
+type distributionPoint struct {
+ DistributionPoint distributionPointName `asn1:"optional,tag:0"`
+ Reason asn1.BitString `asn1:"optional,tag:1"`
+ CRLIssuer asn1.RawValue `asn1:"optional,tag:2"`
+}
+
+type distributionPointName struct {
+ FullName []asn1.RawValue `asn1:"optional,tag:0"`
+ RelativeName pkix.RDNSequence `asn1:"optional,tag:1"`
+}
+
+func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo, nfe *NonFatalErrors) (interface{}, error) {
+ asn1Data := keyData.PublicKey.RightAlign()
+ switch algo {
+ case RSA, RSAESOAEP:
+ // RSA public keys must have a NULL in the parameters.
+ // See RFC 3279, Section 2.3.1.
+ if algo == RSA && !bytes.Equal(keyData.Algorithm.Parameters.FullBytes, asn1.NullBytes) {
+ nfe.AddError(errors.New("x509: RSA key missing NULL parameters"))
+ }
+ if algo == RSAESOAEP {
+ // We only parse the parameters to ensure it is a valid encoding, we throw out the actual values
+ paramsData := keyData.Algorithm.Parameters.FullBytes
+ params := new(rsaesoaepAlgorithmParameters)
+ params.HashFunc = sha1Identifier
+ params.MaskgenFunc = mgf1SHA1Identifier
+ params.PSourceFunc = pSpecifiedEmptyIdentifier
+ rest, err := asn1.Unmarshal(paramsData, params)
+ if err != nil {
+ return nil, err
+ }
+ if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after RSAES-OAEP parameters")
+ }
+ }
+
+ p := new(pkcs1PublicKey)
+ rest, err := asn1.Unmarshal(asn1Data, p)
+ if err != nil {
+ var laxErr error
+ rest, laxErr = asn1.UnmarshalWithParams(asn1Data, p, "lax")
+ if laxErr != nil {
+ return nil, laxErr
+ }
+ nfe.AddError(err)
+ }
+ if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after RSA public key")
+ }
+
+ if p.N.Sign() <= 0 {
+ nfe.AddError(errors.New("x509: RSA modulus is not a positive number"))
+ }
+ if p.E <= 0 {
+ return nil, errors.New("x509: RSA public exponent is not a positive number")
+ }
+
+ // TODO(dkarch): Update to return the parameters once crypto/x509 has come up with permanent solution (https://github.com/golang/go/issues/30416)
+ pub := &rsa.PublicKey{
+ E: p.E,
+ N: p.N,
+ }
+ return pub, nil
+ case DSA:
+ var p *big.Int
+ rest, err := asn1.Unmarshal(asn1Data, &p)
+ if err != nil {
+ var laxErr error
+ rest, laxErr = asn1.UnmarshalWithParams(asn1Data, &p, "lax")
+ if laxErr != nil {
+ return nil, laxErr
+ }
+ nfe.AddError(err)
+ }
+ if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after DSA public key")
+ }
+ paramsData := keyData.Algorithm.Parameters.FullBytes
+ params := new(dsaAlgorithmParameters)
+ rest, err = asn1.Unmarshal(paramsData, params)
+ if err != nil {
+ return nil, err
+ }
+ if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after DSA parameters")
+ }
+ if p.Sign() <= 0 || params.P.Sign() <= 0 || params.Q.Sign() <= 0 || params.G.Sign() <= 0 {
+ return nil, errors.New("x509: zero or negative DSA parameter")
+ }
+ pub := &dsa.PublicKey{
+ Parameters: dsa.Parameters{
+ P: params.P,
+ Q: params.Q,
+ G: params.G,
+ },
+ Y: p,
+ }
+ return pub, nil
+ case ECDSA:
+ paramsData := keyData.Algorithm.Parameters.FullBytes
+ namedCurveOID := new(asn1.ObjectIdentifier)
+ rest, err := asn1.Unmarshal(paramsData, namedCurveOID)
+ if err != nil {
+ return nil, errors.New("x509: failed to parse ECDSA parameters as named curve")
+ }
+ if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after ECDSA parameters")
+ }
+ namedCurve := namedCurveFromOID(*namedCurveOID, nfe)
+ if namedCurve == nil {
+ return nil, fmt.Errorf("x509: unsupported elliptic curve %v", namedCurveOID)
+ }
+ x, y := elliptic.Unmarshal(namedCurve, asn1Data)
+ if x == nil {
+ return nil, errors.New("x509: failed to unmarshal elliptic curve point")
+ }
+ pub := &ecdsa.PublicKey{
+ Curve: namedCurve,
+ X: x,
+ Y: y,
+ }
+ return pub, nil
+ case Ed25519:
+ return ed25519.PublicKey(asn1Data), nil
+ default:
+ return nil, nil
+ }
+}
+
+// NonFatalErrors is an error type which can hold a number of other errors.
+// It's used to collect a range of non-fatal errors which occur while parsing
+// a certificate, that way we can still match on certs which technically are
+// invalid.
+type NonFatalErrors struct {
+ Errors []error
+}
+
+// AddError adds an error to the list of errors contained by NonFatalErrors.
+func (e *NonFatalErrors) AddError(err error) {
+ e.Errors = append(e.Errors, err)
+}
+
+// Returns a string consisting of the values of Error() from all of the errors
+// contained in |e|
+func (e NonFatalErrors) Error() string {
+ r := "NonFatalErrors: "
+ for _, err := range e.Errors {
+ r += err.Error() + "; "
+ }
+ return r
+}
+
+// HasError returns true if |e| contains at least one error
+func (e *NonFatalErrors) HasError() bool {
+ if e == nil {
+ return false
+ }
+ return len(e.Errors) > 0
+}
+
+// Append combines the contents of two NonFatalErrors instances.
+func (e *NonFatalErrors) Append(more *NonFatalErrors) *NonFatalErrors {
+ if e == nil {
+ return more
+ }
+ if more == nil {
+ return e
+ }
+ combined := NonFatalErrors{Errors: make([]error, 0, len(e.Errors)+len(more.Errors))}
+ combined.Errors = append(combined.Errors, e.Errors...)
+ combined.Errors = append(combined.Errors, more.Errors...)
+ return &combined
+}
+
+// IsFatal indicates whether an error is fatal.
+func IsFatal(err error) bool {
+ if err == nil {
+ return false
+ }
+ if _, ok := err.(NonFatalErrors); ok {
+ return false
+ }
+ if errs, ok := err.(*Errors); ok {
+ return errs.Fatal()
+ }
+ return true
+}
+
+func parseDistributionPoints(data []byte, crldp *[]string) error {
+ // CRLDistributionPoints ::= SEQUENCE SIZE (1..MAX) OF DistributionPoint
+ //
+ // DistributionPoint ::= SEQUENCE {
+ // distributionPoint [0] DistributionPointName OPTIONAL,
+ // reasons [1] ReasonFlags OPTIONAL,
+ // cRLIssuer [2] GeneralNames OPTIONAL }
+ //
+ // DistributionPointName ::= CHOICE {
+ // fullName [0] GeneralNames,
+ // nameRelativeToCRLIssuer [1] RelativeDistinguishedName }
+
+ var cdp []distributionPoint
+ if rest, err := asn1.Unmarshal(data, &cdp); err != nil {
+ return err
+ } else if len(rest) != 0 {
+ return errors.New("x509: trailing data after X.509 CRL distribution point")
+ }
+
+ for _, dp := range cdp {
+ // Per RFC 5280, 4.2.1.13, one of distributionPoint or cRLIssuer may be empty.
+ if len(dp.DistributionPoint.FullName) == 0 {
+ continue
+ }
+
+ for _, fullName := range dp.DistributionPoint.FullName {
+ if fullName.Tag == 6 {
+ *crldp = append(*crldp, string(fullName.Bytes))
+ }
+ }
+ }
+ return nil
+}
+
+func forEachSAN(extension []byte, callback func(tag int, data []byte) error) error {
+ // RFC 5280, 4.2.1.6
+
+ // SubjectAltName ::= GeneralNames
+ //
+ // GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName
+ //
+ // GeneralName ::= CHOICE {
+ // otherName [0] OtherName,
+ // rfc822Name [1] IA5String,
+ // dNSName [2] IA5String,
+ // x400Address [3] ORAddress,
+ // directoryName [4] Name,
+ // ediPartyName [5] EDIPartyName,
+ // uniformResourceIdentifier [6] IA5String,
+ // iPAddress [7] OCTET STRING,
+ // registeredID [8] OBJECT IDENTIFIER }
+ var seq asn1.RawValue
+ rest, err := asn1.Unmarshal(extension, &seq)
+ if err != nil {
+ return err
+ } else if len(rest) != 0 {
+ return errors.New("x509: trailing data after X.509 extension")
+ }
+ if !seq.IsCompound || seq.Tag != asn1.TagSequence || seq.Class != asn1.ClassUniversal {
+ return asn1.StructuralError{Msg: "bad SAN sequence"}
+ }
+
+ rest = seq.Bytes
+ for len(rest) > 0 {
+ var v asn1.RawValue
+ rest, err = asn1.Unmarshal(rest, &v)
+ if err != nil {
+ return err
+ }
+
+ if err := callback(v.Tag, v.Bytes); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func parseSANExtension(value []byte, nfe *NonFatalErrors) (dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL, err error) {
+ err = forEachSAN(value, func(tag int, data []byte) error {
+ switch tag {
+ case nameTypeEmail:
+ emailAddresses = append(emailAddresses, string(data))
+ case nameTypeDNS:
+ dnsNames = append(dnsNames, string(data))
+ case nameTypeURI:
+ uri, err := url.Parse(string(data))
+ if err != nil {
+ return fmt.Errorf("x509: cannot parse URI %q: %s", string(data), err)
+ }
+ if len(uri.Host) > 0 {
+ if _, ok := domainToReverseLabels(uri.Host); !ok {
+ return fmt.Errorf("x509: cannot parse URI %q: invalid domain", string(data))
+ }
+ }
+ uris = append(uris, uri)
+ case nameTypeIP:
+ switch len(data) {
+ case net.IPv4len, net.IPv6len:
+ ipAddresses = append(ipAddresses, data)
+ default:
+ nfe.AddError(errors.New("x509: cannot parse IP address of length " + strconv.Itoa(len(data))))
+ }
+ }
+
+ return nil
+ })
+
+ return
+}
+
+// isValidIPMask reports whether mask consists of zero or more 1 bits, followed by zero bits.
+func isValidIPMask(mask []byte) bool {
+ seenZero := false
+
+ for _, b := range mask {
+ if seenZero {
+ if b != 0 {
+ return false
+ }
+
+ continue
+ }
+
+ switch b {
+ case 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe:
+ seenZero = true
+ case 0xff:
+ default:
+ return false
+ }
+ }
+
+ return true
+}
+
+func parseNameConstraintsExtension(out *Certificate, e pkix.Extension, nfe *NonFatalErrors) (unhandled bool, err error) {
+ // RFC 5280, 4.2.1.10
+
+ // NameConstraints ::= SEQUENCE {
+ // permittedSubtrees [0] GeneralSubtrees OPTIONAL,
+ // excludedSubtrees [1] GeneralSubtrees OPTIONAL }
+ //
+ // GeneralSubtrees ::= SEQUENCE SIZE (1..MAX) OF GeneralSubtree
+ //
+ // GeneralSubtree ::= SEQUENCE {
+ // base GeneralName,
+ // minimum [0] BaseDistance DEFAULT 0,
+ // maximum [1] BaseDistance OPTIONAL }
+ //
+ // BaseDistance ::= INTEGER (0..MAX)
+
+ outer := cryptobyte.String(e.Value)
+ var toplevel, permitted, excluded cryptobyte.String
+ var havePermitted, haveExcluded bool
+ if !outer.ReadASN1(&toplevel, cryptobyte_asn1.SEQUENCE) ||
+ !outer.Empty() ||
+ !toplevel.ReadOptionalASN1(&permitted, &havePermitted, cryptobyte_asn1.Tag(0).ContextSpecific().Constructed()) ||
+ !toplevel.ReadOptionalASN1(&excluded, &haveExcluded, cryptobyte_asn1.Tag(1).ContextSpecific().Constructed()) ||
+ !toplevel.Empty() {
+ return false, errors.New("x509: invalid NameConstraints extension")
+ }
+
+ if !havePermitted && !haveExcluded || len(permitted) == 0 && len(excluded) == 0 {
+ // From RFC 5280, Section 4.2.1.10:
+ // “either the permittedSubtrees field
+ // or the excludedSubtrees MUST be
+ // present”
+ return false, errors.New("x509: empty name constraints extension")
+ }
+
+ getValues := func(subtrees cryptobyte.String) (dnsNames []string, ips []*net.IPNet, emails, uriDomains []string, err error) {
+ for !subtrees.Empty() {
+ var seq, value cryptobyte.String
+ var tag cryptobyte_asn1.Tag
+ if !subtrees.ReadASN1(&seq, cryptobyte_asn1.SEQUENCE) ||
+ !seq.ReadAnyASN1(&value, &tag) {
+ return nil, nil, nil, nil, fmt.Errorf("x509: invalid NameConstraints extension")
+ }
+
+ var (
+ dnsTag = cryptobyte_asn1.Tag(2).ContextSpecific()
+ emailTag = cryptobyte_asn1.Tag(1).ContextSpecific()
+ ipTag = cryptobyte_asn1.Tag(7).ContextSpecific()
+ uriTag = cryptobyte_asn1.Tag(6).ContextSpecific()
+ )
+
+ switch tag {
+ case dnsTag:
+ domain := string(value)
+ if err := isIA5String(domain); err != nil {
+ return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error())
+ }
+
+ trimmedDomain := domain
+ if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' {
+ // constraints can have a leading
+ // period to exclude the domain
+ // itself, but that's not valid in a
+ // normal domain name.
+ trimmedDomain = trimmedDomain[1:]
+ }
+ if _, ok := domainToReverseLabels(trimmedDomain); !ok {
+ nfe.AddError(fmt.Errorf("x509: failed to parse dnsName constraint %q", domain))
+ }
+ dnsNames = append(dnsNames, domain)
+
+ case ipTag:
+ l := len(value)
+ var ip, mask []byte
+
+ switch l {
+ case 8:
+ ip = value[:4]
+ mask = value[4:]
+
+ case 32:
+ ip = value[:16]
+ mask = value[16:]
+
+ default:
+ return nil, nil, nil, nil, fmt.Errorf("x509: IP constraint contained value of length %d", l)
+ }
+
+ if !isValidIPMask(mask) {
+ return nil, nil, nil, nil, fmt.Errorf("x509: IP constraint contained invalid mask %x", mask)
+ }
+
+ ips = append(ips, &net.IPNet{IP: net.IP(ip), Mask: net.IPMask(mask)})
+
+ case emailTag:
+ constraint := string(value)
+ if err := isIA5String(constraint); err != nil {
+ return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error())
+ }
+
+ // If the constraint contains an @ then
+ // it specifies an exact mailbox name.
+ if strings.Contains(constraint, "@") {
+ if _, ok := parseRFC2821Mailbox(constraint); !ok {
+ nfe.AddError(fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint))
+ }
+ } else {
+ // Otherwise it's a domain name.
+ domain := constraint
+ if len(domain) > 0 && domain[0] == '.' {
+ domain = domain[1:]
+ }
+ if _, ok := domainToReverseLabels(domain); !ok {
+ nfe.AddError(fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint))
+ }
+ }
+ emails = append(emails, constraint)
+
+ case uriTag:
+ domain := string(value)
+ if err := isIA5String(domain); err != nil {
+ return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error())
+ }
+
+ if net.ParseIP(domain) != nil {
+ return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q: cannot be IP address", domain)
+ }
+
+ trimmedDomain := domain
+ if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' {
+ // constraints can have a leading
+ // period to exclude the domain itself,
+ // but that's not valid in a normal
+ // domain name.
+ trimmedDomain = trimmedDomain[1:]
+ }
+ if _, ok := domainToReverseLabels(trimmedDomain); !ok {
+ nfe.AddError(fmt.Errorf("x509: failed to parse URI constraint %q", domain))
+ }
+ uriDomains = append(uriDomains, domain)
+
+ default:
+ unhandled = true
+ }
+ }
+
+ return dnsNames, ips, emails, uriDomains, nil
+ }
+
+ if out.PermittedDNSDomains, out.PermittedIPRanges, out.PermittedEmailAddresses, out.PermittedURIDomains, err = getValues(permitted); err != nil {
+ return false, err
+ }
+ if out.ExcludedDNSDomains, out.ExcludedIPRanges, out.ExcludedEmailAddresses, out.ExcludedURIDomains, err = getValues(excluded); err != nil {
+ return false, err
+ }
+ out.PermittedDNSDomainsCritical = e.Critical
+
+ return unhandled, nil
+}
+
+func parseCertificate(in *certificate, tbsOnly bool) (*Certificate, error) {
+ var nfe NonFatalErrors
+
+ // Certificates contain two signature algorithm identifier fields,
+ // one in the inner signed tbsCertificate structure and one in the
+ // outer unsigned certificate structure. RFC 5280 requires these
+ // fields match, but golang doesn't impose this restriction. Because
+ // the outer structure is not covered by the signature the algorithm
+ // field is entirely malleable. This allows a user to bypass the
+ // leaf data uniqueness check that happens in trillian by altering
+ // the unbounded OID or parameter fields of the algorithmIdentifier
+ // structure and submit an infinite number of duplicate but slightly
+ // different looking certificates to a log. To avoid this directly
+ // compare the bytes of the two algorithmIdentifier structures
+ // and reject the certificate if they do not match.
+ if !tbsOnly && !bytes.Equal(in.SignatureAlgorithm.Raw, in.TBSCertificate.SignatureAlgorithm.Raw) {
+ return nil, errors.New("x509: mismatching signature algorithm identifiers")
+ }
+
+ out := new(Certificate)
+ out.Raw = in.Raw
+ out.RawTBSCertificate = in.TBSCertificate.Raw
+ out.RawSubjectPublicKeyInfo = in.TBSCertificate.PublicKey.Raw
+ out.RawSubject = in.TBSCertificate.Subject.FullBytes
+ out.RawIssuer = in.TBSCertificate.Issuer.FullBytes
+
+ out.Signature = in.SignatureValue.RightAlign()
+ out.SignatureAlgorithm = SignatureAlgorithmFromAI(in.TBSCertificate.SignatureAlgorithm)
+
+ out.PublicKeyAlgorithm =
+ getPublicKeyAlgorithmFromOID(in.TBSCertificate.PublicKey.Algorithm.Algorithm)
+ var err error
+ out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCertificate.PublicKey, &nfe)
+ if err != nil {
+ return nil, err
+ }
+
+ out.Version = in.TBSCertificate.Version + 1
+ out.SerialNumber = in.TBSCertificate.SerialNumber
+
+ var issuer, subject pkix.RDNSequence
+ if rest, err := asn1.Unmarshal(in.TBSCertificate.Subject.FullBytes, &subject); err != nil {
+ var laxErr error
+ rest, laxErr = asn1.UnmarshalWithParams(in.TBSCertificate.Subject.FullBytes, &subject, "lax")
+ if laxErr != nil {
+ return nil, laxErr
+ }
+ nfe.AddError(err)
+ } else if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after X.509 subject")
+ }
+ if rest, err := asn1.Unmarshal(in.TBSCertificate.Issuer.FullBytes, &issuer); err != nil {
+ var laxErr error
+ rest, laxErr = asn1.UnmarshalWithParams(in.TBSCertificate.Issuer.FullBytes, &issuer, "lax")
+ if laxErr != nil {
+ return nil, laxErr
+ }
+ nfe.AddError(err)
+ } else if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after X.509 subject")
+ }
+
+ out.Issuer.FillFromRDNSequence(&issuer)
+ out.Subject.FillFromRDNSequence(&subject)
+
+ out.NotBefore = in.TBSCertificate.Validity.NotBefore
+ out.NotAfter = in.TBSCertificate.Validity.NotAfter
+
+ for _, e := range in.TBSCertificate.Extensions {
+ out.Extensions = append(out.Extensions, e)
+ unhandled := false
+
+ if len(e.Id) == 4 && e.Id[0] == OIDExtensionArc[0] && e.Id[1] == OIDExtensionArc[1] && e.Id[2] == OIDExtensionArc[2] {
+ switch e.Id[3] {
+ case OIDExtensionKeyUsage[3]:
+ // RFC 5280, 4.2.1.3
+ var usageBits asn1.BitString
+ if rest, err := asn1.Unmarshal(e.Value, &usageBits); err != nil {
+ return nil, err
+ } else if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after X.509 KeyUsage")
+ }
+
+ var usage int
+ for i := 0; i < 9; i++ {
+ if usageBits.At(i) != 0 {
+ usage |= 1 << uint(i)
+ }
+ }
+ out.KeyUsage = KeyUsage(usage)
+
+ case OIDExtensionBasicConstraints[3]:
+ // RFC 5280, 4.2.1.9
+ var constraints basicConstraints
+ if rest, err := asn1.Unmarshal(e.Value, &constraints); err != nil {
+ return nil, err
+ } else if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after X.509 BasicConstraints")
+ }
+
+ out.BasicConstraintsValid = true
+ out.IsCA = constraints.IsCA
+ out.MaxPathLen = constraints.MaxPathLen
+ out.MaxPathLenZero = out.MaxPathLen == 0
+ // TODO: map out.MaxPathLen to 0 if it has the -1 default value? (Issue 19285)
+
+ case OIDExtensionSubjectAltName[3]:
+ out.DNSNames, out.EmailAddresses, out.IPAddresses, out.URIs, err = parseSANExtension(e.Value, &nfe)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(out.DNSNames) == 0 && len(out.EmailAddresses) == 0 && len(out.IPAddresses) == 0 && len(out.URIs) == 0 {
+ // If we didn't parse anything then we do the critical check, below.
+ unhandled = true
+ }
+
+ case OIDExtensionNameConstraints[3]:
+ unhandled, err = parseNameConstraintsExtension(out, e, &nfe)
+ if err != nil {
+ return nil, err
+ }
+
+ case OIDExtensionCRLDistributionPoints[3]:
+ // RFC 5280, 4.2.1.13
+ if err := parseDistributionPoints(e.Value, &out.CRLDistributionPoints); err != nil {
+ return nil, err
+ }
+
+ case OIDExtensionAuthorityKeyId[3]:
+ // RFC 5280, 4.2.1.1
+ var a authKeyId
+ if rest, err := asn1.Unmarshal(e.Value, &a); err != nil {
+ return nil, err
+ } else if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after X.509 authority key-id")
+ }
+ out.AuthorityKeyId = a.Id
+
+ case OIDExtensionExtendedKeyUsage[3]:
+ // RFC 5280, 4.2.1.12. Extended Key Usage
+
+ // id-ce-extKeyUsage OBJECT IDENTIFIER ::= { id-ce 37 }
+ //
+ // ExtKeyUsageSyntax ::= SEQUENCE SIZE (1..MAX) OF KeyPurposeId
+ //
+ // KeyPurposeId ::= OBJECT IDENTIFIER
+
+ var keyUsage []asn1.ObjectIdentifier
+ if len(e.Value) == 0 {
+ nfe.AddError(errors.New("x509: empty ExtendedKeyUsage"))
+ } else {
+ rest, err := asn1.Unmarshal(e.Value, &keyUsage)
+ if err != nil {
+ var laxErr error
+ rest, laxErr = asn1.UnmarshalWithParams(e.Value, &keyUsage, "lax")
+ if laxErr != nil {
+ return nil, laxErr
+ }
+ nfe.AddError(err)
+ }
+ if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after X.509 ExtendedKeyUsage")
+ }
+ }
+
+ for _, u := range keyUsage {
+ if extKeyUsage, ok := extKeyUsageFromOID(u); ok {
+ out.ExtKeyUsage = append(out.ExtKeyUsage, extKeyUsage)
+ } else {
+ out.UnknownExtKeyUsage = append(out.UnknownExtKeyUsage, u)
+ }
+ }
+
+ case OIDExtensionSubjectKeyId[3]:
+ // RFC 5280, 4.2.1.2
+ var keyid []byte
+ if rest, err := asn1.Unmarshal(e.Value, &keyid); err != nil {
+ return nil, err
+ } else if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after X.509 key-id")
+ }
+ out.SubjectKeyId = keyid
+
+ case OIDExtensionCertificatePolicies[3]:
+ // RFC 5280 4.2.1.4: Certificate Policies
+ var policies []policyInformation
+ if rest, err := asn1.Unmarshal(e.Value, &policies); err != nil {
+ return nil, err
+ } else if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after X.509 certificate policies")
+ }
+ out.PolicyIdentifiers = make([]asn1.ObjectIdentifier, len(policies))
+ for i, policy := range policies {
+ out.PolicyIdentifiers[i] = policy.Policy
+ }
+
+ default:
+ // Unknown extensions are recorded if critical.
+ unhandled = true
+ }
+ } else if e.Id.Equal(OIDExtensionAuthorityInfoAccess) {
+ // RFC 5280 4.2.2.1: Authority Information Access
+ var aia []accessDescription
+ if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil {
+ return nil, err
+ } else if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after X.509 authority information")
+ }
+ if len(aia) == 0 {
+ nfe.AddError(errors.New("x509: empty AuthorityInfoAccess extension"))
+ }
+
+ for _, v := range aia {
+ // GeneralName: uniformResourceIdentifier [6] IA5String
+ if v.Location.Tag != 6 {
+ continue
+ }
+ if v.Method.Equal(OIDAuthorityInfoAccessOCSP) {
+ out.OCSPServer = append(out.OCSPServer, string(v.Location.Bytes))
+ } else if v.Method.Equal(OIDAuthorityInfoAccessIssuers) {
+ out.IssuingCertificateURL = append(out.IssuingCertificateURL, string(v.Location.Bytes))
+ }
+ }
+ } else if e.Id.Equal(OIDExtensionSubjectInfoAccess) {
+ // RFC 5280 4.2.2.2: Subject Information Access
+ var sia []accessDescription
+ if rest, err := asn1.Unmarshal(e.Value, &sia); err != nil {
+ return nil, err
+ } else if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after X.509 subject information")
+ }
+ if len(sia) == 0 {
+ nfe.AddError(errors.New("x509: empty SubjectInfoAccess extension"))
+ }
+
+ for _, v := range sia {
+ // TODO(drysdale): cope with non-URI types of GeneralName
+ // GeneralName: uniformResourceIdentifier [6] IA5String
+ if v.Location.Tag != 6 {
+ continue
+ }
+ if v.Method.Equal(OIDSubjectInfoAccessTimestamp) {
+ out.SubjectTimestamps = append(out.SubjectTimestamps, string(v.Location.Bytes))
+ } else if v.Method.Equal(OIDSubjectInfoAccessCARepo) {
+ out.SubjectCARepositories = append(out.SubjectCARepositories, string(v.Location.Bytes))
+ }
+ }
+ } else if e.Id.Equal(OIDExtensionIPPrefixList) {
+ out.RPKIAddressRanges = parseRPKIAddrBlocks(e.Value, &nfe)
+ } else if e.Id.Equal(OIDExtensionASList) {
+ out.RPKIASNumbers, out.RPKIRoutingDomainIDs = parseRPKIASIdentifiers(e.Value, &nfe)
+ } else if e.Id.Equal(OIDExtensionCTSCT) {
+ if rest, err := asn1.Unmarshal(e.Value, &out.RawSCT); err != nil {
+ nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal SCT list extension: %v", err))
+ } else if len(rest) != 0 {
+ nfe.AddError(errors.New("trailing data after ASN1-encoded SCT list"))
+ } else {
+ if rest, err := tls.Unmarshal(out.RawSCT, &out.SCTList); err != nil {
+ nfe.AddError(fmt.Errorf("failed to tls.Unmarshal SCT list: %v", err))
+ } else if len(rest) != 0 {
+ nfe.AddError(errors.New("trailing data after TLS-encoded SCT list"))
+ }
+ }
+ } else {
+ // Unknown extensions are recorded if critical.
+ unhandled = true
+ }
+
+ if e.Critical && unhandled {
+ out.UnhandledCriticalExtensions = append(out.UnhandledCriticalExtensions, e.Id)
+ }
+ }
+ if nfe.HasError() {
+ return out, nfe
+ }
+ return out, nil
+}
+
+// ParseTBSCertificate parses a single TBSCertificate from the given ASN.1 DER data.
+// The parsed data is returned in a Certificate struct for ease of access.
+func ParseTBSCertificate(asn1Data []byte) (*Certificate, error) {
+ var tbsCert tbsCertificate
+ var nfe NonFatalErrors
+ rest, err := asn1.Unmarshal(asn1Data, &tbsCert)
+ if err != nil {
+ var laxErr error
+ rest, laxErr = asn1.UnmarshalWithParams(asn1Data, &tbsCert, "lax")
+ if laxErr != nil {
+ return nil, laxErr
+ }
+ nfe.AddError(err)
+ }
+ if len(rest) > 0 {
+ return nil, asn1.SyntaxError{Msg: "trailing data"}
+ }
+ ret, err := parseCertificate(&certificate{
+ Raw: tbsCert.Raw,
+ TBSCertificate: tbsCert}, true)
+ if err != nil {
+ errs, ok := err.(NonFatalErrors)
+ if !ok {
+ return nil, err
+ }
+ nfe.Errors = append(nfe.Errors, errs.Errors...)
+ }
+ if nfe.HasError() {
+ return ret, nfe
+ }
+ return ret, nil
+}
+
+// ParseCertificate parses a single certificate from the given ASN.1 DER data.
+// This function can return both a Certificate and an error (in which case the
+// error will be of type NonFatalErrors).
+func ParseCertificate(asn1Data []byte) (*Certificate, error) {
+ var cert certificate
+ var nfe NonFatalErrors
+ rest, err := asn1.Unmarshal(asn1Data, &cert)
+ if err != nil {
+ var laxErr error
+ rest, laxErr = asn1.UnmarshalWithParams(asn1Data, &cert, "lax")
+ if laxErr != nil {
+ return nil, laxErr
+ }
+ nfe.AddError(err)
+ }
+ if len(rest) > 0 {
+ return nil, asn1.SyntaxError{Msg: "trailing data"}
+ }
+ ret, err := parseCertificate(&cert, false)
+ if err != nil {
+ errs, ok := err.(NonFatalErrors)
+ if !ok {
+ return nil, err
+ }
+ nfe.Errors = append(nfe.Errors, errs.Errors...)
+ }
+ if nfe.HasError() {
+ return ret, nfe
+ }
+ return ret, nil
+}
+
+// ParseCertificates parses one or more certificates from the given ASN.1 DER
+// data. The certificates must be concatenated with no intermediate padding.
+// This function can return both a slice of Certificate and an error (in which
+// case the error will be of type NonFatalErrors).
+func ParseCertificates(asn1Data []byte) ([]*Certificate, error) {
+ var v []*certificate
+ var nfe NonFatalErrors
+
+ for len(asn1Data) > 0 {
+ cert := new(certificate)
+ var err error
+ asn1Data, err = asn1.Unmarshal(asn1Data, cert)
+ if err != nil {
+ var laxErr error
+ asn1Data, laxErr = asn1.UnmarshalWithParams(asn1Data, &cert, "lax")
+ if laxErr != nil {
+ return nil, laxErr
+ }
+ nfe.AddError(err)
+ }
+ v = append(v, cert)
+ }
+
+ ret := make([]*Certificate, len(v))
+ for i, ci := range v {
+ cert, err := parseCertificate(ci, false)
+ if err != nil {
+ errs, ok := err.(NonFatalErrors)
+ if !ok {
+ return nil, err
+ }
+ nfe.Errors = append(nfe.Errors, errs.Errors...)
+ }
+ ret[i] = cert
+ }
+
+ if nfe.HasError() {
+ return ret, nfe
+ }
+ return ret, nil
+}
+
+func reverseBitsInAByte(in byte) byte {
+ b1 := in>>4 | in<<4
+ b2 := b1>>2&0x33 | b1<<2&0xcc
+ b3 := b2>>1&0x55 | b2<<1&0xaa
+ return b3
+}
+
+// asn1BitLength returns the bit-length of bitString by considering the
+// most-significant bit in a byte to be the "first" bit. This convention
+// matches ASN.1, but differs from almost everything else.
+func asn1BitLength(bitString []byte) int {
+ bitLen := len(bitString) * 8
+
+ for i := range bitString {
+ b := bitString[len(bitString)-i-1]
+
+ for bit := uint(0); bit < 8; bit++ {
+ if (b>>bit)&1 == 1 {
+ return bitLen
+ }
+ bitLen--
+ }
+ }
+
+ return 0
+}
+
+// OID values for standard extensions from RFC 5280.
+var (
+ OIDExtensionArc = asn1.ObjectIdentifier{2, 5, 29} // id-ce RFC5280 s4.2.1
+ OIDExtensionSubjectKeyId = asn1.ObjectIdentifier{2, 5, 29, 14}
+ OIDExtensionKeyUsage = asn1.ObjectIdentifier{2, 5, 29, 15}
+ OIDExtensionExtendedKeyUsage = asn1.ObjectIdentifier{2, 5, 29, 37}
+ OIDExtensionAuthorityKeyId = asn1.ObjectIdentifier{2, 5, 29, 35}
+ OIDExtensionBasicConstraints = asn1.ObjectIdentifier{2, 5, 29, 19}
+ OIDExtensionSubjectAltName = asn1.ObjectIdentifier{2, 5, 29, 17}
+ OIDExtensionCertificatePolicies = asn1.ObjectIdentifier{2, 5, 29, 32}
+ OIDExtensionNameConstraints = asn1.ObjectIdentifier{2, 5, 29, 30}
+ OIDExtensionCRLDistributionPoints = asn1.ObjectIdentifier{2, 5, 29, 31}
+ OIDExtensionIssuerAltName = asn1.ObjectIdentifier{2, 5, 29, 18}
+ OIDExtensionSubjectDirectoryAttributes = asn1.ObjectIdentifier{2, 5, 29, 9}
+ OIDExtensionInhibitAnyPolicy = asn1.ObjectIdentifier{2, 5, 29, 54}
+ OIDExtensionPolicyConstraints = asn1.ObjectIdentifier{2, 5, 29, 36}
+ OIDExtensionPolicyMappings = asn1.ObjectIdentifier{2, 5, 29, 33}
+ OIDExtensionFreshestCRL = asn1.ObjectIdentifier{2, 5, 29, 46}
+
+ OIDExtensionAuthorityInfoAccess = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 1}
+ OIDExtensionSubjectInfoAccess = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 11}
+
+ // OIDExtensionCTPoison is defined in RFC 6962 s3.1.
+ OIDExtensionCTPoison = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3}
+ // OIDExtensionCTSCT is defined in RFC 6962 s3.3.
+ OIDExtensionCTSCT = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2}
+ // OIDExtensionIPPrefixList is defined in RFC 3779 s2.
+ OIDExtensionIPPrefixList = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 7}
+ // OIDExtensionASList is defined in RFC 3779 s3.
+ OIDExtensionASList = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 8}
+)
+
+var (
+ OIDAuthorityInfoAccessOCSP = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1}
+ OIDAuthorityInfoAccessIssuers = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 2}
+ OIDSubjectInfoAccessTimestamp = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 3}
+ OIDSubjectInfoAccessCARepo = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 5}
+ OIDAnyPolicy = asn1.ObjectIdentifier{2, 5, 29, 32, 0}
+)
+
+// oidInExtensions reports whether an extension with the given oid exists in
+// extensions.
+func oidInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) bool {
+ for _, e := range extensions {
+ if e.Id.Equal(oid) {
+ return true
+ }
+ }
+ return false
+}
+
+// marshalSANs marshals a list of addresses into a the contents of an X.509
+// SubjectAlternativeName extension.
+func marshalSANs(dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL) (derBytes []byte, err error) {
+ var rawValues []asn1.RawValue
+ for _, name := range dnsNames {
+ rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeDNS, Class: asn1.ClassContextSpecific, Bytes: []byte(name)})
+ }
+ for _, email := range emailAddresses {
+ rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeEmail, Class: asn1.ClassContextSpecific, Bytes: []byte(email)})
+ }
+ for _, rawIP := range ipAddresses {
+ // If possible, we always want to encode IPv4 addresses in 4 bytes.
+ ip := rawIP.To4()
+ if ip == nil {
+ ip = rawIP
+ }
+ rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeIP, Class: asn1.ClassContextSpecific, Bytes: ip})
+ }
+ for _, uri := range uris {
+ rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeURI, Class: asn1.ClassContextSpecific, Bytes: []byte(uri.String())})
+ }
+ return asn1.Marshal(rawValues)
+}
+
+func isIA5String(s string) error {
+ for _, r := range s {
+ if r >= utf8.RuneSelf {
+ return fmt.Errorf("x509: %q cannot be encoded as an IA5String", s)
+ }
+ }
+
+ return nil
+}
+
+func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId []byte) (ret []pkix.Extension, err error) {
+ ret = make([]pkix.Extension, 12 /* maximum number of elements. */)
+ n := 0
+
+ if template.KeyUsage != 0 &&
+ !oidInExtensions(OIDExtensionKeyUsage, template.ExtraExtensions) {
+ ret[n].Id = OIDExtensionKeyUsage
+ ret[n].Critical = true
+
+ var a [2]byte
+ a[0] = reverseBitsInAByte(byte(template.KeyUsage))
+ a[1] = reverseBitsInAByte(byte(template.KeyUsage >> 8))
+
+ l := 1
+ if a[1] != 0 {
+ l = 2
+ }
+
+ bitString := a[:l]
+ ret[n].Value, err = asn1.Marshal(asn1.BitString{Bytes: bitString, BitLength: asn1BitLength(bitString)})
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if (len(template.ExtKeyUsage) > 0 || len(template.UnknownExtKeyUsage) > 0) &&
+ !oidInExtensions(OIDExtensionExtendedKeyUsage, template.ExtraExtensions) {
+ ret[n].Id = OIDExtensionExtendedKeyUsage
+
+ var oids []asn1.ObjectIdentifier
+ for _, u := range template.ExtKeyUsage {
+ if oid, ok := oidFromExtKeyUsage(u); ok {
+ oids = append(oids, oid)
+ } else {
+ panic("internal error")
+ }
+ }
+
+ oids = append(oids, template.UnknownExtKeyUsage...)
+
+ ret[n].Value, err = asn1.Marshal(oids)
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if template.BasicConstraintsValid && !oidInExtensions(OIDExtensionBasicConstraints, template.ExtraExtensions) {
+ // Leaving MaxPathLen as zero indicates that no maximum path
+ // length is desired, unless MaxPathLenZero is set. A value of
+ // -1 causes encoding/asn1 to omit the value as desired.
+ maxPathLen := template.MaxPathLen
+ if maxPathLen == 0 && !template.MaxPathLenZero {
+ maxPathLen = -1
+ }
+ ret[n].Id = OIDExtensionBasicConstraints
+ ret[n].Value, err = asn1.Marshal(basicConstraints{template.IsCA, maxPathLen})
+ ret[n].Critical = true
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if len(template.SubjectKeyId) > 0 && !oidInExtensions(OIDExtensionSubjectKeyId, template.ExtraExtensions) {
+ ret[n].Id = OIDExtensionSubjectKeyId
+ ret[n].Value, err = asn1.Marshal(template.SubjectKeyId)
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if len(authorityKeyId) > 0 && !oidInExtensions(OIDExtensionAuthorityKeyId, template.ExtraExtensions) {
+ ret[n].Id = OIDExtensionAuthorityKeyId
+ ret[n].Value, err = asn1.Marshal(authKeyId{authorityKeyId})
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if (len(template.OCSPServer) > 0 || len(template.IssuingCertificateURL) > 0) &&
+ !oidInExtensions(OIDExtensionAuthorityInfoAccess, template.ExtraExtensions) {
+ ret[n].Id = OIDExtensionAuthorityInfoAccess
+ var aiaValues []accessDescription
+ for _, name := range template.OCSPServer {
+ aiaValues = append(aiaValues, accessDescription{
+ Method: OIDAuthorityInfoAccessOCSP,
+ Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)},
+ })
+ }
+ for _, name := range template.IssuingCertificateURL {
+ aiaValues = append(aiaValues, accessDescription{
+ Method: OIDAuthorityInfoAccessIssuers,
+ Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)},
+ })
+ }
+ ret[n].Value, err = asn1.Marshal(aiaValues)
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if len(template.SubjectTimestamps) > 0 || len(template.SubjectCARepositories) > 0 &&
+ !oidInExtensions(OIDExtensionSubjectInfoAccess, template.ExtraExtensions) {
+ ret[n].Id = OIDExtensionSubjectInfoAccess
+ var siaValues []accessDescription
+ for _, ts := range template.SubjectTimestamps {
+ siaValues = append(siaValues, accessDescription{
+ Method: OIDSubjectInfoAccessTimestamp,
+ Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(ts)},
+ })
+ }
+ for _, repo := range template.SubjectCARepositories {
+ siaValues = append(siaValues, accessDescription{
+ Method: OIDSubjectInfoAccessCARepo,
+ Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(repo)},
+ })
+ }
+ ret[n].Value, err = asn1.Marshal(siaValues)
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0 || len(template.URIs) > 0) &&
+ !oidInExtensions(OIDExtensionSubjectAltName, template.ExtraExtensions) {
+ ret[n].Id = OIDExtensionSubjectAltName
+ // From RFC 5280, Section 4.2.1.6:
+ // “If the subject field contains an empty sequence ... then
+ // subjectAltName extension ... is marked as critical”
+ ret[n].Critical = subjectIsEmpty
+ ret[n].Value, err = marshalSANs(template.DNSNames, template.EmailAddresses, template.IPAddresses, template.URIs)
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if len(template.PolicyIdentifiers) > 0 &&
+ !oidInExtensions(OIDExtensionCertificatePolicies, template.ExtraExtensions) {
+ ret[n].Id = OIDExtensionCertificatePolicies
+ policies := make([]policyInformation, len(template.PolicyIdentifiers))
+ for i, policy := range template.PolicyIdentifiers {
+ policies[i].Policy = policy
+ }
+ ret[n].Value, err = asn1.Marshal(policies)
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if (len(template.PermittedDNSDomains) > 0 || len(template.ExcludedDNSDomains) > 0 ||
+ len(template.PermittedIPRanges) > 0 || len(template.ExcludedIPRanges) > 0 ||
+ len(template.PermittedEmailAddresses) > 0 || len(template.ExcludedEmailAddresses) > 0 ||
+ len(template.PermittedURIDomains) > 0 || len(template.ExcludedURIDomains) > 0) &&
+ !oidInExtensions(OIDExtensionNameConstraints, template.ExtraExtensions) {
+ ret[n].Id = OIDExtensionNameConstraints
+ ret[n].Critical = template.PermittedDNSDomainsCritical
+
+ ipAndMask := func(ipNet *net.IPNet) []byte {
+ maskedIP := ipNet.IP.Mask(ipNet.Mask)
+ ipAndMask := make([]byte, 0, len(maskedIP)+len(ipNet.Mask))
+ ipAndMask = append(ipAndMask, maskedIP...)
+ ipAndMask = append(ipAndMask, ipNet.Mask...)
+ return ipAndMask
+ }
+
+ serialiseConstraints := func(dns []string, ips []*net.IPNet, emails []string, uriDomains []string) (der []byte, err error) {
+ var b cryptobyte.Builder
+
+ for _, name := range dns {
+ if err = isIA5String(name); err != nil {
+ return nil, err
+ }
+
+ b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) {
+ b.AddASN1(cryptobyte_asn1.Tag(2).ContextSpecific(), func(b *cryptobyte.Builder) {
+ b.AddBytes([]byte(name))
+ })
+ })
+ }
+
+ for _, ipNet := range ips {
+ b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) {
+ b.AddASN1(cryptobyte_asn1.Tag(7).ContextSpecific(), func(b *cryptobyte.Builder) {
+ b.AddBytes(ipAndMask(ipNet))
+ })
+ })
+ }
+
+ for _, email := range emails {
+ if err = isIA5String(email); err != nil {
+ return nil, err
+ }
+
+ b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) {
+ b.AddASN1(cryptobyte_asn1.Tag(1).ContextSpecific(), func(b *cryptobyte.Builder) {
+ b.AddBytes([]byte(email))
+ })
+ })
+ }
+
+ for _, uriDomain := range uriDomains {
+ if err = isIA5String(uriDomain); err != nil {
+ return nil, err
+ }
+
+ b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) {
+ b.AddASN1(cryptobyte_asn1.Tag(6).ContextSpecific(), func(b *cryptobyte.Builder) {
+ b.AddBytes([]byte(uriDomain))
+ })
+ })
+ }
+
+ return b.Bytes()
+ }
+
+ permitted, err := serialiseConstraints(template.PermittedDNSDomains, template.PermittedIPRanges, template.PermittedEmailAddresses, template.PermittedURIDomains)
+ if err != nil {
+ return nil, err
+ }
+
+ excluded, err := serialiseConstraints(template.ExcludedDNSDomains, template.ExcludedIPRanges, template.ExcludedEmailAddresses, template.ExcludedURIDomains)
+ if err != nil {
+ return nil, err
+ }
+
+ var b cryptobyte.Builder
+ b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) {
+ if len(permitted) > 0 {
+ b.AddASN1(cryptobyte_asn1.Tag(0).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) {
+ b.AddBytes(permitted)
+ })
+ }
+
+ if len(excluded) > 0 {
+ b.AddASN1(cryptobyte_asn1.Tag(1).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) {
+ b.AddBytes(excluded)
+ })
+ }
+ })
+
+ ret[n].Value, err = b.Bytes()
+ if err != nil {
+ return nil, err
+ }
+ n++
+ }
+
+ if len(template.CRLDistributionPoints) > 0 &&
+ !oidInExtensions(OIDExtensionCRLDistributionPoints, template.ExtraExtensions) {
+ ret[n].Id = OIDExtensionCRLDistributionPoints
+
+ var crlDp []distributionPoint
+ for _, name := range template.CRLDistributionPoints {
+ dp := distributionPoint{
+ DistributionPoint: distributionPointName{
+ FullName: []asn1.RawValue{
+ {Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)},
+ },
+ },
+ }
+ crlDp = append(crlDp, dp)
+ }
+
+ ret[n].Value, err = asn1.Marshal(crlDp)
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if (len(template.RawSCT) > 0 || len(template.SCTList.SCTList) > 0) && !oidInExtensions(OIDExtensionCTSCT, template.ExtraExtensions) {
+ rawSCT := template.RawSCT
+ if len(template.SCTList.SCTList) > 0 {
+ rawSCT, err = tls.Marshal(template.SCTList)
+ if err != nil {
+ return
+ }
+ }
+ ret[n].Id = OIDExtensionCTSCT
+ ret[n].Value, err = asn1.Marshal(rawSCT)
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ // Adding another extension here? Remember to update the maximum number
+ // of elements in the make() at the top of the function and the list of
+ // template fields used in CreateCertificate documentation.
+
+ return append(ret[:n], template.ExtraExtensions...), nil
+}
+
+func subjectBytes(cert *Certificate) ([]byte, error) {
+ if len(cert.RawSubject) > 0 {
+ return cert.RawSubject, nil
+ }
+
+ return asn1.Marshal(cert.Subject.ToRDNSequence())
+}
+
+// signingParamsForPublicKey returns the parameters to use for signing with
+// priv. If requestedSigAlgo is not zero then it overrides the default
+// signature algorithm.
+func signingParamsForPublicKey(pub interface{}, requestedSigAlgo SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) {
+ var pubType PublicKeyAlgorithm
+
+ switch pub := pub.(type) {
+ case *rsa.PublicKey:
+ pubType = RSA
+ hashFunc = crypto.SHA256
+ sigAlgo.Algorithm = oidSignatureSHA256WithRSA
+ sigAlgo.Parameters = asn1.NullRawValue
+
+ case *ecdsa.PublicKey:
+ pubType = ECDSA
+
+ switch pub.Curve {
+ case elliptic.P224(), elliptic.P256():
+ hashFunc = crypto.SHA256
+ sigAlgo.Algorithm = oidSignatureECDSAWithSHA256
+ case elliptic.P384():
+ hashFunc = crypto.SHA384
+ sigAlgo.Algorithm = oidSignatureECDSAWithSHA384
+ case elliptic.P521():
+ hashFunc = crypto.SHA512
+ sigAlgo.Algorithm = oidSignatureECDSAWithSHA512
+ default:
+ err = errors.New("x509: unknown elliptic curve")
+ }
+
+ case ed25519.PublicKey:
+ pubType = Ed25519
+ sigAlgo.Algorithm = oidSignatureEd25519
+
+ default:
+ err = errors.New("x509: only RSA, ECDSA and Ed25519 keys supported")
+ }
+
+ if err != nil {
+ return
+ }
+
+ if requestedSigAlgo == 0 {
+ return
+ }
+
+ found := false
+ for _, details := range signatureAlgorithmDetails {
+ if details.algo == requestedSigAlgo {
+ if details.pubKeyAlgo != pubType {
+ err = errors.New("x509: requested SignatureAlgorithm does not match private key type")
+ return
+ }
+ sigAlgo.Algorithm, hashFunc = details.oid, details.hash
+ if hashFunc == 0 && pubType != Ed25519 {
+ err = errors.New("x509: cannot sign with hash function requested")
+ return
+ }
+ if requestedSigAlgo.isRSAPSS() {
+ sigAlgo.Parameters = rsaPSSParameters(hashFunc)
+ }
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ err = errors.New("x509: unknown SignatureAlgorithm")
+ }
+
+ return
+}
+
+// emptyASN1Subject is the ASN.1 DER encoding of an empty Subject, which is
+// just an empty SEQUENCE.
+var emptyASN1Subject = []byte{0x30, 0}
+
+// CreateCertificate creates a new X.509v3 certificate based on a template.
+// The following members of template are used:
+// - SerialNumber
+// - Subject
+// - NotBefore, NotAfter
+// - SignatureAlgorithm
+// - For extensions:
+// - KeyUsage
+// - ExtKeyUsage, UnknownExtKeyUsage
+// - BasicConstraintsValid, IsCA, MaxPathLen, MaxPathLenZero
+// - SubjectKeyId
+// - AuthorityKeyId
+// - OCSPServer, IssuingCertificateURL
+// - SubjectTimestamps, SubjectCARepositories
+// - DNSNames, EmailAddresses, IPAddresses, URIs
+// - PolicyIdentifiers
+// - ExcludedDNSDomains, ExcludedIPRanges, ExcludedEmailAddresses, ExcludedURIDomains, PermittedDNSDomainsCritical,
+// PermittedDNSDomains, PermittedIPRanges, PermittedEmailAddresses, PermittedURIDomains
+// - CRLDistributionPoints
+// - RawSCT, SCTList
+// - ExtraExtensions
+//
+// The certificate is signed by parent. If parent is equal to template then the
+// certificate is self-signed. The parameter pub is the public key of the
+// signee and priv is the private key of the signer.
+//
+// The returned slice is the certificate in DER encoding.
+//
+// The currently supported key types are *rsa.PublicKey, *ecdsa.PublicKey and
+// ed25519.PublicKey. pub must be a supported key type, and priv must be a
+// crypto.Signer with a supported public key.
+//
+// The AuthorityKeyId will be taken from the SubjectKeyId of parent, if any,
+// unless the resulting certificate is self-signed. Otherwise the value from
+// template will be used.
+func CreateCertificate(rand io.Reader, template, parent *Certificate, pub, priv interface{}) (cert []byte, err error) {
+ key, ok := priv.(crypto.Signer)
+ if !ok {
+ return nil, errors.New("x509: certificate private key does not implement crypto.Signer")
+ }
+
+ if template.SerialNumber == nil {
+ return nil, errors.New("x509: no SerialNumber given")
+ }
+
+ hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(key.Public(), template.SignatureAlgorithm)
+ if err != nil {
+ return nil, err
+ }
+
+ publicKeyBytes, publicKeyAlgorithm, err := marshalPublicKey(pub)
+ if err != nil {
+ return nil, err
+ }
+
+ asn1Issuer, err := subjectBytes(parent)
+ if err != nil {
+ return
+ }
+
+ asn1Subject, err := subjectBytes(template)
+ if err != nil {
+ return
+ }
+
+ authorityKeyId := template.AuthorityKeyId
+ if !bytes.Equal(asn1Issuer, asn1Subject) && len(parent.SubjectKeyId) > 0 {
+ authorityKeyId = parent.SubjectKeyId
+ }
+
+ extensions, err := buildExtensions(template, bytes.Equal(asn1Subject, emptyASN1Subject), authorityKeyId)
+ if err != nil {
+ return
+ }
+
+ encodedPublicKey := asn1.BitString{BitLength: len(publicKeyBytes) * 8, Bytes: publicKeyBytes}
+ c := tbsCertificate{
+ Version: 2,
+ SerialNumber: template.SerialNumber,
+ SignatureAlgorithm: signatureAlgorithm,
+ Issuer: asn1.RawValue{FullBytes: asn1Issuer},
+ Validity: validity{template.NotBefore.UTC(), template.NotAfter.UTC()},
+ Subject: asn1.RawValue{FullBytes: asn1Subject},
+ PublicKey: publicKeyInfo{nil, publicKeyAlgorithm, encodedPublicKey},
+ Extensions: extensions,
+ }
+
+ tbsCertContents, err := asn1.Marshal(c)
+ if err != nil {
+ return
+ }
+ c.Raw = tbsCertContents
+
+ signed := tbsCertContents
+ if hashFunc != 0 {
+ h := hashFunc.New()
+ h.Write(signed)
+ signed = h.Sum(nil)
+ }
+
+ var signerOpts crypto.SignerOpts = hashFunc
+ if template.SignatureAlgorithm != 0 && template.SignatureAlgorithm.isRSAPSS() {
+ signerOpts = &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ Hash: hashFunc,
+ }
+ }
+
+ var signature []byte
+ signature, err = key.Sign(rand, signed, signerOpts)
+ if err != nil {
+ return
+ }
+
+ return asn1.Marshal(certificate{
+ nil,
+ c,
+ signatureAlgorithm,
+ asn1.BitString{Bytes: signature, BitLength: len(signature) * 8},
+ })
+}
+
+// pemCRLPrefix is the magic string that indicates that we have a PEM encoded
+// CRL.
+var pemCRLPrefix = []byte("-----BEGIN X509 CRL")
+
+// pemType is the type of a PEM encoded CRL.
+var pemType = "X509 CRL"
+
+// ParseCRL parses a CRL from the given bytes. It's often the case that PEM
+// encoded CRLs will appear where they should be DER encoded, so this function
+// will transparently handle PEM encoding as long as there isn't any leading
+// garbage.
+func ParseCRL(crlBytes []byte) (*pkix.CertificateList, error) {
+ if bytes.HasPrefix(crlBytes, pemCRLPrefix) {
+ block, _ := pem.Decode(crlBytes)
+ if block != nil && block.Type == pemType {
+ crlBytes = block.Bytes
+ }
+ }
+ return ParseDERCRL(crlBytes)
+}
+
+// ParseDERCRL parses a DER encoded CRL from the given bytes.
+func ParseDERCRL(derBytes []byte) (*pkix.CertificateList, error) {
+ certList := new(pkix.CertificateList)
+ if rest, err := asn1.Unmarshal(derBytes, certList); err != nil {
+ return nil, err
+ } else if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after CRL")
+ }
+ return certList, nil
+}
+
+// CreateCRL returns a DER encoded CRL, signed by this Certificate, that
+// contains the given list of revoked certificates.
+func (c *Certificate) CreateCRL(rand io.Reader, priv interface{}, revokedCerts []pkix.RevokedCertificate, now, expiry time.Time) (crlBytes []byte, err error) {
+ key, ok := priv.(crypto.Signer)
+ if !ok {
+ return nil, errors.New("x509: certificate private key does not implement crypto.Signer")
+ }
+
+ hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(key.Public(), 0)
+ if err != nil {
+ return nil, err
+ }
+
+ // Force revocation times to UTC per RFC 5280.
+ revokedCertsUTC := make([]pkix.RevokedCertificate, len(revokedCerts))
+ for i, rc := range revokedCerts {
+ rc.RevocationTime = rc.RevocationTime.UTC()
+ revokedCertsUTC[i] = rc
+ }
+
+ tbsCertList := pkix.TBSCertificateList{
+ Version: 1,
+ Signature: signatureAlgorithm,
+ Issuer: c.Subject.ToRDNSequence(),
+ ThisUpdate: now.UTC(),
+ NextUpdate: expiry.UTC(),
+ RevokedCertificates: revokedCertsUTC,
+ }
+
+ // Authority Key Id
+ if len(c.SubjectKeyId) > 0 {
+ var aki pkix.Extension
+ aki.Id = OIDExtensionAuthorityKeyId
+ aki.Value, err = asn1.Marshal(authKeyId{Id: c.SubjectKeyId})
+ if err != nil {
+ return
+ }
+ tbsCertList.Extensions = append(tbsCertList.Extensions, aki)
+ }
+
+ tbsCertListContents, err := asn1.Marshal(tbsCertList)
+ if err != nil {
+ return
+ }
+
+ signed := tbsCertListContents
+ if hashFunc != 0 {
+ h := hashFunc.New()
+ h.Write(signed)
+ signed = h.Sum(nil)
+ }
+
+ var signature []byte
+ signature, err = key.Sign(rand, signed, hashFunc)
+ if err != nil {
+ return
+ }
+
+ return asn1.Marshal(pkix.CertificateList{
+ TBSCertList: tbsCertList,
+ SignatureAlgorithm: signatureAlgorithm,
+ SignatureValue: asn1.BitString{Bytes: signature, BitLength: len(signature) * 8},
+ })
+}
+
+// CertificateRequest represents a PKCS #10, certificate signature request.
+type CertificateRequest struct {
+ Raw []byte // Complete ASN.1 DER content (CSR, signature algorithm and signature).
+ RawTBSCertificateRequest []byte // Certificate request info part of raw ASN.1 DER content.
+ RawSubjectPublicKeyInfo []byte // DER encoded SubjectPublicKeyInfo.
+ RawSubject []byte // DER encoded Subject.
+
+ Version int
+ Signature []byte
+ SignatureAlgorithm SignatureAlgorithm
+
+ PublicKeyAlgorithm PublicKeyAlgorithm
+ PublicKey interface{}
+
+ Subject pkix.Name
+
+ // Attributes contains the CSR attributes that can parse as
+ // pkix.AttributeTypeAndValueSET.
+ //
+ // Deprecated: Use Extensions and ExtraExtensions instead for parsing and
+ // generating the requestedExtensions attribute.
+ Attributes []pkix.AttributeTypeAndValueSET
+
+ // Extensions contains all requested extensions, in raw form. When parsing
+ // CSRs, this can be used to extract extensions that are not parsed by this
+ // package.
+ Extensions []pkix.Extension
+
+ // ExtraExtensions contains extensions to be copied, raw, into any CSR
+ // marshaled by CreateCertificateRequest. Values override any extensions
+ // that would otherwise be produced based on the other fields but are
+ // overridden by any extensions specified in Attributes.
+ //
+ // The ExtraExtensions field is not populated by ParseCertificateRequest,
+ // see Extensions instead.
+ ExtraExtensions []pkix.Extension
+
+ // Subject Alternate Name values.
+ DNSNames []string
+ EmailAddresses []string
+ IPAddresses []net.IP
+ URIs []*url.URL
+}
+
+// These structures reflect the ASN.1 structure of X.509 certificate
+// signature requests (see RFC 2986):
+
+type tbsCertificateRequest struct {
+ Raw asn1.RawContent
+ Version int
+ Subject asn1.RawValue
+ PublicKey publicKeyInfo
+ RawAttributes []asn1.RawValue `asn1:"tag:0"`
+}
+
+type certificateRequest struct {
+ Raw asn1.RawContent
+ TBSCSR tbsCertificateRequest
+ SignatureAlgorithm pkix.AlgorithmIdentifier
+ SignatureValue asn1.BitString
+}
+
+// oidExtensionRequest is a PKCS#9 OBJECT IDENTIFIER that indicates requested
+// extensions in a CSR.
+var oidExtensionRequest = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 14}
+
+// newRawAttributes converts AttributeTypeAndValueSETs from a template
+// CertificateRequest's Attributes into tbsCertificateRequest RawAttributes.
+func newRawAttributes(attributes []pkix.AttributeTypeAndValueSET) ([]asn1.RawValue, error) {
+ var rawAttributes []asn1.RawValue
+ b, err := asn1.Marshal(attributes)
+ if err != nil {
+ return nil, err
+ }
+ rest, err := asn1.Unmarshal(b, &rawAttributes)
+ if err != nil {
+ return nil, err
+ }
+ if len(rest) != 0 {
+ return nil, errors.New("x509: failed to unmarshal raw CSR Attributes")
+ }
+ return rawAttributes, nil
+}
+
+// parseRawAttributes Unmarshals RawAttributes into AttributeTypeAndValueSETs.
+func parseRawAttributes(rawAttributes []asn1.RawValue) []pkix.AttributeTypeAndValueSET {
+ var attributes []pkix.AttributeTypeAndValueSET
+ for _, rawAttr := range rawAttributes {
+ var attr pkix.AttributeTypeAndValueSET
+ rest, err := asn1.Unmarshal(rawAttr.FullBytes, &attr)
+ // Ignore attributes that don't parse into pkix.AttributeTypeAndValueSET
+ // (i.e.: challengePassword or unstructuredName).
+ if err == nil && len(rest) == 0 {
+ attributes = append(attributes, attr)
+ }
+ }
+ return attributes
+}
+
+// parseCSRExtensions parses the attributes from a CSR and extracts any
+// requested extensions.
+func parseCSRExtensions(rawAttributes []asn1.RawValue) ([]pkix.Extension, error) {
+ // pkcs10Attribute reflects the Attribute structure from RFC 2986, Section 4.1.
+ type pkcs10Attribute struct {
+ Id asn1.ObjectIdentifier
+ Values []asn1.RawValue `asn1:"set"`
+ }
+
+ var ret []pkix.Extension
+ for _, rawAttr := range rawAttributes {
+ var attr pkcs10Attribute
+ if rest, err := asn1.Unmarshal(rawAttr.FullBytes, &attr); err != nil || len(rest) != 0 || len(attr.Values) == 0 {
+ // Ignore attributes that don't parse.
+ continue
+ }
+
+ if !attr.Id.Equal(oidExtensionRequest) {
+ continue
+ }
+
+ var extensions []pkix.Extension
+ if _, err := asn1.Unmarshal(attr.Values[0].FullBytes, &extensions); err != nil {
+ return nil, err
+ }
+ ret = append(ret, extensions...)
+ }
+
+ return ret, nil
+}
+
+// CreateCertificateRequest creates a new certificate request based on a
+// template. The following members of template are used:
+//
+// - SignatureAlgorithm
+// - Subject
+// - DNSNames
+// - EmailAddresses
+// - IPAddresses
+// - URIs
+// - ExtraExtensions
+// - Attributes (deprecated)
+//
+// priv is the private key to sign the CSR with, and the corresponding public
+// key will be included in the CSR. It must implement crypto.Signer and its
+// Public() method must return a *rsa.PublicKey or a *ecdsa.PublicKey or a
+// ed25519.PublicKey. (A *rsa.PrivateKey, *ecdsa.PrivateKey or
+// ed25519.PrivateKey satisfies this.)
+//
+// The returned slice is the certificate request in DER encoding.
+func CreateCertificateRequest(rand io.Reader, template *CertificateRequest, priv interface{}) (csr []byte, err error) {
+ key, ok := priv.(crypto.Signer)
+ if !ok {
+ return nil, errors.New("x509: certificate private key does not implement crypto.Signer")
+ }
+
+ var hashFunc crypto.Hash
+ var sigAlgo pkix.AlgorithmIdentifier
+ hashFunc, sigAlgo, err = signingParamsForPublicKey(key.Public(), template.SignatureAlgorithm)
+ if err != nil {
+ return nil, err
+ }
+
+ var publicKeyBytes []byte
+ var publicKeyAlgorithm pkix.AlgorithmIdentifier
+ publicKeyBytes, publicKeyAlgorithm, err = marshalPublicKey(key.Public())
+ if err != nil {
+ return nil, err
+ }
+
+ var extensions []pkix.Extension
+
+ if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0 || len(template.URIs) > 0) &&
+ !oidInExtensions(OIDExtensionSubjectAltName, template.ExtraExtensions) {
+ sanBytes, err := marshalSANs(template.DNSNames, template.EmailAddresses, template.IPAddresses, template.URIs)
+ if err != nil {
+ return nil, err
+ }
+
+ extensions = append(extensions, pkix.Extension{
+ Id: OIDExtensionSubjectAltName,
+ Value: sanBytes,
+ })
+ }
+
+ extensions = append(extensions, template.ExtraExtensions...)
+
+ // Make a copy of template.Attributes because we may alter it below.
+ attributes := make([]pkix.AttributeTypeAndValueSET, 0, len(template.Attributes))
+ for _, attr := range template.Attributes {
+ values := make([][]pkix.AttributeTypeAndValue, len(attr.Value))
+ copy(values, attr.Value)
+ attributes = append(attributes, pkix.AttributeTypeAndValueSET{
+ Type: attr.Type,
+ Value: values,
+ })
+ }
+
+ extensionsAppended := false
+ if len(extensions) > 0 {
+ // Append the extensions to an existing attribute if possible.
+ for _, atvSet := range attributes {
+ if !atvSet.Type.Equal(oidExtensionRequest) || len(atvSet.Value) == 0 {
+ continue
+ }
+
+ // specifiedExtensions contains all the extensions that we
+ // found specified via template.Attributes.
+ specifiedExtensions := make(map[string]bool)
+
+ for _, atvs := range atvSet.Value {
+ for _, atv := range atvs {
+ specifiedExtensions[atv.Type.String()] = true
+ }
+ }
+
+ newValue := make([]pkix.AttributeTypeAndValue, 0, len(atvSet.Value[0])+len(extensions))
+ newValue = append(newValue, atvSet.Value[0]...)
+
+ for _, e := range extensions {
+ if specifiedExtensions[e.Id.String()] {
+ // Attributes already contained a value for
+ // this extension and it takes priority.
+ continue
+ }
+
+ newValue = append(newValue, pkix.AttributeTypeAndValue{
+ // There is no place for the critical
+ // flag in an AttributeTypeAndValue.
+ Type: e.Id,
+ Value: e.Value,
+ })
+ }
+
+ atvSet.Value[0] = newValue
+ extensionsAppended = true
+ break
+ }
+ }
+
+ rawAttributes, err := newRawAttributes(attributes)
+ if err != nil {
+ return
+ }
+
+ // If not included in attributes, add a new attribute for the
+ // extensions.
+ if len(extensions) > 0 && !extensionsAppended {
+ attr := struct {
+ Type asn1.ObjectIdentifier
+ Value [][]pkix.Extension `asn1:"set"`
+ }{
+ Type: oidExtensionRequest,
+ Value: [][]pkix.Extension{extensions},
+ }
+
+ b, err := asn1.Marshal(attr)
+ if err != nil {
+ return nil, errors.New("x509: failed to serialise extensions attribute: " + err.Error())
+ }
+
+ var rawValue asn1.RawValue
+ if _, err := asn1.Unmarshal(b, &rawValue); err != nil {
+ return nil, err
+ }
+
+ rawAttributes = append(rawAttributes, rawValue)
+ }
+
+ asn1Subject := template.RawSubject
+ if len(asn1Subject) == 0 {
+ asn1Subject, err = asn1.Marshal(template.Subject.ToRDNSequence())
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ tbsCSR := tbsCertificateRequest{
+ Version: 0, // PKCS #10, RFC 2986
+ Subject: asn1.RawValue{FullBytes: asn1Subject},
+ PublicKey: publicKeyInfo{
+ Algorithm: publicKeyAlgorithm,
+ PublicKey: asn1.BitString{
+ Bytes: publicKeyBytes,
+ BitLength: len(publicKeyBytes) * 8,
+ },
+ },
+ RawAttributes: rawAttributes,
+ }
+
+ tbsCSRContents, err := asn1.Marshal(tbsCSR)
+ if err != nil {
+ return
+ }
+ tbsCSR.Raw = tbsCSRContents
+
+ signed := tbsCSRContents
+ if hashFunc != 0 {
+ h := hashFunc.New()
+ h.Write(signed)
+ signed = h.Sum(nil)
+ }
+
+ var signature []byte
+ signature, err = key.Sign(rand, signed, hashFunc)
+ if err != nil {
+ return
+ }
+
+ return asn1.Marshal(certificateRequest{
+ TBSCSR: tbsCSR,
+ SignatureAlgorithm: sigAlgo,
+ SignatureValue: asn1.BitString{
+ Bytes: signature,
+ BitLength: len(signature) * 8,
+ },
+ })
+}
+
+// ParseCertificateRequest parses a single certificate request from the
+// given ASN.1 DER data.
+func ParseCertificateRequest(asn1Data []byte) (*CertificateRequest, error) {
+ var csr certificateRequest
+
+ rest, err := asn1.Unmarshal(asn1Data, &csr)
+ if err != nil {
+ return nil, err
+ } else if len(rest) != 0 {
+ return nil, asn1.SyntaxError{Msg: "trailing data"}
+ }
+
+ return parseCertificateRequest(&csr)
+}
+
+func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error) {
+ out := &CertificateRequest{
+ Raw: in.Raw,
+ RawTBSCertificateRequest: in.TBSCSR.Raw,
+ RawSubjectPublicKeyInfo: in.TBSCSR.PublicKey.Raw,
+ RawSubject: in.TBSCSR.Subject.FullBytes,
+
+ Signature: in.SignatureValue.RightAlign(),
+ SignatureAlgorithm: SignatureAlgorithmFromAI(in.SignatureAlgorithm),
+
+ PublicKeyAlgorithm: getPublicKeyAlgorithmFromOID(in.TBSCSR.PublicKey.Algorithm.Algorithm),
+
+ Version: in.TBSCSR.Version,
+ Attributes: parseRawAttributes(in.TBSCSR.RawAttributes),
+ }
+
+ var err error
+ var nfe NonFatalErrors
+ out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCSR.PublicKey, &nfe)
+ if err != nil {
+ return nil, err
+ }
+ // Treat non-fatal errors as fatal here.
+ if len(nfe.Errors) > 0 {
+ return nil, nfe.Errors[0]
+ }
+
+ var subject pkix.RDNSequence
+ if rest, err := asn1.Unmarshal(in.TBSCSR.Subject.FullBytes, &subject); err != nil {
+ return nil, err
+ } else if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after X.509 Subject")
+ }
+
+ out.Subject.FillFromRDNSequence(&subject)
+
+ if out.Extensions, err = parseCSRExtensions(in.TBSCSR.RawAttributes); err != nil {
+ return nil, err
+ }
+
+ for _, extension := range out.Extensions {
+ if extension.Id.Equal(OIDExtensionSubjectAltName) {
+ out.DNSNames, out.EmailAddresses, out.IPAddresses, out.URIs, err = parseSANExtension(extension.Value, &nfe)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return out, nil
+}
+
+// CheckSignature reports whether the signature on c is valid.
+func (c *CertificateRequest) CheckSignature() error {
+ return checkSignature(c.SignatureAlgorithm, c.RawTBSCertificateRequest, c.Signature, c.PublicKey)
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509util/files.go b/vendor/github.com/google/certificate-transparency-go/x509util/files.go
new file mode 100644
index 00000000000..823ac7375a9
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509util/files.go
@@ -0,0 +1,116 @@
+// Copyright 2016 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package x509util
+
+import (
+ "encoding/pem"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+
+ "github.com/google/certificate-transparency-go/x509"
+)
+
+// ReadPossiblePEMFile loads data from a file which may be in DER format
+// or may be in PEM format (with the given blockname).
+func ReadPossiblePEMFile(filename, blockname string) ([][]byte, error) {
+ data, err := os.ReadFile(filename)
+ if err != nil {
+ return nil, fmt.Errorf("%s: failed to read data: %v", filename, err)
+ }
+ return dePEM(data, blockname), nil
+}
+
+// ReadPossiblePEMURL attempts to determine if the given target is a local file or a
+// URL, and return the file contents regardless. It also copes with either PEM or DER
+// format data.
+func ReadPossiblePEMURL(target, blockname string) ([][]byte, error) {
+ if !strings.HasPrefix(target, "http://") && !strings.HasPrefix(target, "https://") {
+ // Assume it's a filename
+ return ReadPossiblePEMFile(target, blockname)
+ }
+
+ rsp, err := http.Get(target)
+ if err != nil {
+ return nil, fmt.Errorf("failed to http.Get(%q): %v", target, err)
+ }
+ data, err := io.ReadAll(rsp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("failed to io.ReadAll(%q): %v", target, err)
+ }
+ return dePEM(data, blockname), nil
+}
+
+func dePEM(data []byte, blockname string) [][]byte {
+ var results [][]byte
+ if strings.Contains(string(data), "BEGIN "+blockname) {
+ rest := data
+ for {
+ var block *pem.Block
+ block, rest = pem.Decode(rest)
+ if block == nil {
+ break
+ }
+ if block.Type == blockname {
+ results = append(results, block.Bytes)
+ }
+ }
+ } else {
+ results = append(results, data)
+ }
+ return results
+}
+
+// ReadFileOrURL returns the data from a target which may be either a filename
+// or an HTTP(S) URL.
+func ReadFileOrURL(target string, client *http.Client) ([]byte, error) {
+ u, err := url.Parse(target)
+ if err != nil || (u.Scheme != "http" && u.Scheme != "https") {
+ return os.ReadFile(target)
+ }
+
+ rsp, err := client.Get(u.String())
+ if err != nil {
+ return nil, fmt.Errorf("failed to http.Get(%q): %v", target, err)
+ }
+ return io.ReadAll(rsp.Body)
+}
+
+// GetIssuer attempts to retrieve the issuer for a certificate, by examining
+// the cert's Authority Information Access extension (if present) for the
+// issuer's URL and retrieving from there.
+func GetIssuer(cert *x509.Certificate, client *http.Client) (*x509.Certificate, error) {
+ if len(cert.IssuingCertificateURL) == 0 {
+ return nil, nil
+ }
+ issuerURL := cert.IssuingCertificateURL[0]
+ rsp, err := client.Get(issuerURL)
+ if err != nil || rsp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("failed to get issuer from %q: %v", issuerURL, err)
+ }
+ defer rsp.Body.Close()
+ body, err := io.ReadAll(rsp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read issuer from %q: %v", issuerURL, err)
+ }
+ issuers, err := x509.ParseCertificates(body)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse issuer cert: %v", err)
+ }
+ return issuers[0], nil
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509util/pem_cert_pool.go b/vendor/github.com/google/certificate-transparency-go/x509util/pem_cert_pool.go
new file mode 100644
index 00000000000..c21bd650589
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509util/pem_cert_pool.go
@@ -0,0 +1,118 @@
+// Copyright 2016 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package x509util
+
+import (
+ "crypto/sha256"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "os"
+
+ "github.com/google/certificate-transparency-go/x509"
+)
+
+// String for certificate blocks in BEGIN / END PEM headers
+const pemCertificateBlockType string = "CERTIFICATE"
+
+// PEMCertPool is a wrapper / extension to x509.CertPool. It allows us to access the
+// raw certs, which we need to serve get-roots request and has stricter handling on loading
+// certs into the pool. CertPool ignores errors if at least one cert loads correctly but
+// PEMCertPool requires all certs to load.
+type PEMCertPool struct {
+ // maps from sha-256 to certificate, used for dup detection
+ fingerprintToCertMap map[[sha256.Size]byte]x509.Certificate
+ rawCerts []*x509.Certificate
+ certPool *x509.CertPool
+}
+
+// NewPEMCertPool creates a new, empty, instance of PEMCertPool.
+func NewPEMCertPool() *PEMCertPool {
+ return &PEMCertPool{fingerprintToCertMap: make(map[[sha256.Size]byte]x509.Certificate), certPool: x509.NewCertPool()}
+}
+
+// AddCert adds a certificate to a pool. Uses fingerprint to weed out duplicates.
+// cert must not be nil.
+func (p *PEMCertPool) AddCert(cert *x509.Certificate) {
+ fingerprint := sha256.Sum256(cert.Raw)
+ _, ok := p.fingerprintToCertMap[fingerprint]
+
+ if !ok {
+ p.fingerprintToCertMap[fingerprint] = *cert
+ p.certPool.AddCert(cert)
+ p.rawCerts = append(p.rawCerts, cert)
+ }
+}
+
+// Included indicates whether the given cert is included in the pool.
+func (p *PEMCertPool) Included(cert *x509.Certificate) bool {
+ fingerprint := sha256.Sum256(cert.Raw)
+ _, ok := p.fingerprintToCertMap[fingerprint]
+ return ok
+}
+
+// AppendCertsFromPEM adds certs to the pool from a byte slice assumed to contain PEM encoded data.
+// Skips over non certificate blocks in the data. Returns true if all certificates in the
+// data were parsed and added to the pool successfully and at least one certificate was found.
+func (p *PEMCertPool) AppendCertsFromPEM(pemCerts []byte) (ok bool) {
+ for len(pemCerts) > 0 {
+ var block *pem.Block
+ block, pemCerts = pem.Decode(pemCerts)
+ if block == nil {
+ break
+ }
+ if block.Type != pemCertificateBlockType || len(block.Headers) != 0 {
+ continue
+ }
+
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if x509.IsFatal(err) {
+ return false
+ }
+
+ p.AddCert(cert)
+ ok = true
+ }
+
+ return
+}
+
+// AppendCertsFromPEMFile adds certs from a file that contains concatenated PEM data.
+func (p *PEMCertPool) AppendCertsFromPEMFile(pemFile string) error {
+ pemData, err := os.ReadFile(pemFile)
+ if err != nil {
+ return fmt.Errorf("failed to load PEM certs file: %v", err)
+ }
+
+ if !p.AppendCertsFromPEM(pemData) {
+ return errors.New("failed to parse PEM certs file")
+ }
+ return nil
+}
+
+// Subjects returns a list of the DER-encoded subjects of all of the certificates in the pool.
+func (p *PEMCertPool) Subjects() (res [][]byte) {
+ return p.certPool.Subjects()
+}
+
+// CertPool returns the underlying CertPool.
+func (p *PEMCertPool) CertPool() *x509.CertPool {
+ return p.certPool
+}
+
+// RawCertificates returns a list of the raw bytes of certificates that are in this pool
+func (p *PEMCertPool) RawCertificates() []*x509.Certificate {
+ return p.rawCerts
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509util/revoked.go b/vendor/github.com/google/certificate-transparency-go/x509util/revoked.go
new file mode 100644
index 00000000000..e0927292630
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509util/revoked.go
@@ -0,0 +1,169 @@
+// Copyright 2017 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package x509util
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "strconv"
+
+ "github.com/google/certificate-transparency-go/x509"
+ "github.com/google/certificate-transparency-go/x509/pkix"
+)
+
+// RevocationReasonToString generates a string describing a revocation reason code.
+func RevocationReasonToString(reason x509.RevocationReasonCode) string {
+ switch reason {
+ case x509.Unspecified:
+ return "Unspecified"
+ case x509.KeyCompromise:
+ return "Key Compromise"
+ case x509.CACompromise:
+ return "CA Compromise"
+ case x509.AffiliationChanged:
+ return "Affiliation Changed"
+ case x509.Superseded:
+ return "Superseded"
+ case x509.CessationOfOperation:
+ return "Cessation Of Operation"
+ case x509.CertificateHold:
+ return "Certificate Hold"
+ case x509.RemoveFromCRL:
+ return "Remove From CRL"
+ case x509.PrivilegeWithdrawn:
+ return "Privilege Withdrawn"
+ case x509.AACompromise:
+ return "AA Compromise"
+ default:
+ return strconv.Itoa(int(reason))
+ }
+}
+
+// CRLToString generates a string describing the given certificate revocation list.
+// The output roughly resembles that from openssl crl -text.
+func CRLToString(crl *x509.CertificateList) string {
+ var result bytes.Buffer
+ var showCritical = func(critical bool) {
+ if critical {
+ result.WriteString(" critical")
+ }
+ result.WriteString("\n")
+ }
+ result.WriteString("Certificate Revocation List (CRL):\n")
+ result.WriteString(fmt.Sprintf(" Version: %d (%#x)\n", crl.TBSCertList.Version+1, crl.TBSCertList.Version))
+ result.WriteString(fmt.Sprintf(" Signature Algorithm: %v\n", x509.SignatureAlgorithmFromAI(crl.TBSCertList.Signature)))
+ var issuer pkix.Name
+ issuer.FillFromRDNSequence(&crl.TBSCertList.Issuer)
+ result.WriteString(fmt.Sprintf(" Issuer: %v\n", NameToString(issuer)))
+ result.WriteString(fmt.Sprintf(" Last Update: %v\n", crl.TBSCertList.ThisUpdate))
+ result.WriteString(fmt.Sprintf(" Next Update: %v\n", crl.TBSCertList.NextUpdate))
+
+ if len(crl.TBSCertList.Extensions) > 0 {
+ result.WriteString(" CRL extensions:\n")
+ }
+
+ count, critical := OIDInExtensions(x509.OIDExtensionAuthorityKeyId, crl.TBSCertList.Extensions)
+ if count > 0 {
+ result.WriteString(" X509v3 Authority Key Identifier:")
+ showCritical(critical)
+ result.WriteString(fmt.Sprintf(" keyid:%v\n", hex.EncodeToString(crl.TBSCertList.AuthorityKeyID)))
+ }
+ count, critical = OIDInExtensions(x509.OIDExtensionIssuerAltName, crl.TBSCertList.Extensions)
+ if count > 0 {
+ result.WriteString(" X509v3 Issuer Alt Name:")
+ showCritical(critical)
+ result.WriteString(fmt.Sprintf(" %s\n", GeneralNamesToString(&crl.TBSCertList.IssuerAltNames)))
+ }
+ count, critical = OIDInExtensions(x509.OIDExtensionCRLNumber, crl.TBSCertList.Extensions)
+ if count > 0 {
+ result.WriteString(" X509v3 CRLNumber:")
+ showCritical(critical)
+ result.WriteString(fmt.Sprintf(" %d\n", crl.TBSCertList.CRLNumber))
+ }
+ count, critical = OIDInExtensions(x509.OIDExtensionDeltaCRLIndicator, crl.TBSCertList.Extensions)
+ if count > 0 {
+ result.WriteString(" X509v3 Delta CRL Indicator:")
+ showCritical(critical)
+ result.WriteString(fmt.Sprintf(" %d\n", crl.TBSCertList.BaseCRLNumber))
+ }
+ count, critical = OIDInExtensions(x509.OIDExtensionIssuingDistributionPoint, crl.TBSCertList.Extensions)
+ if count > 0 {
+ result.WriteString(" X509v3 Issuing Distribution Point:")
+ showCritical(critical)
+ result.WriteString(fmt.Sprintf(" %s\n", GeneralNamesToString(&crl.TBSCertList.IssuingDPFullNames)))
+ }
+ count, critical = OIDInExtensions(x509.OIDExtensionFreshestCRL, crl.TBSCertList.Extensions)
+ if count > 0 {
+ result.WriteString(" X509v3 Freshest CRL:")
+ showCritical(critical)
+ result.WriteString(" Full Name:\n")
+ var buf bytes.Buffer
+ for _, pt := range crl.TBSCertList.FreshestCRLDistributionPoint {
+ commaAppend(&buf, "URI:"+pt)
+ }
+ result.WriteString(fmt.Sprintf(" %v\n", buf.String()))
+ }
+ count, critical = OIDInExtensions(x509.OIDExtensionAuthorityInfoAccess, crl.TBSCertList.Extensions)
+ if count > 0 {
+ result.WriteString(" Authority Information Access:")
+ showCritical(critical)
+ var issuerBuf bytes.Buffer
+ for _, issuer := range crl.TBSCertList.IssuingCertificateURL {
+ commaAppend(&issuerBuf, "URI:"+issuer)
+ }
+ if issuerBuf.Len() > 0 {
+ result.WriteString(fmt.Sprintf(" CA Issuers - %v\n", issuerBuf.String()))
+ }
+ var ocspBuf bytes.Buffer
+ for _, ocsp := range crl.TBSCertList.OCSPServer {
+ commaAppend(&ocspBuf, "URI:"+ocsp)
+ }
+ if ocspBuf.Len() > 0 {
+ result.WriteString(fmt.Sprintf(" OCSP - %v\n", ocspBuf.String()))
+ }
+ // TODO(drysdale): Display other GeneralName types
+ }
+
+ result.WriteString("\n")
+ result.WriteString("Revoked Certificates:\n")
+ for _, c := range crl.TBSCertList.RevokedCertificates {
+ result.WriteString(fmt.Sprintf(" Serial Number: %s (0x%s)\n", c.SerialNumber.Text(10), c.SerialNumber.Text(16)))
+ result.WriteString(fmt.Sprintf(" Revocation Date : %v\n", c.RevocationTime))
+ count, critical = OIDInExtensions(x509.OIDExtensionCRLReasons, c.Extensions)
+ if count > 0 {
+ result.WriteString(" X509v3 CRL Reason Code:")
+ showCritical(critical)
+ result.WriteString(fmt.Sprintf(" %s\n", RevocationReasonToString(c.RevocationReason)))
+ }
+ count, critical = OIDInExtensions(x509.OIDExtensionInvalidityDate, c.Extensions)
+ if count > 0 {
+ result.WriteString(" Invalidity Date:")
+ showCritical(critical)
+ result.WriteString(fmt.Sprintf(" %s\n", c.InvalidityDate))
+ }
+ count, critical = OIDInExtensions(x509.OIDExtensionCertificateIssuer, c.Extensions)
+ if count > 0 {
+ result.WriteString(" Issuer:")
+ showCritical(critical)
+ result.WriteString(fmt.Sprintf(" %s\n", GeneralNamesToString(&c.Issuer)))
+ }
+ }
+ result.WriteString(fmt.Sprintf(" Signature Algorithm: %v\n", x509.SignatureAlgorithmFromAI(crl.SignatureAlgorithm)))
+ appendHexData(&result, crl.SignatureValue.Bytes, 18, " ")
+ result.WriteString("\n")
+
+ return result.String()
+}
diff --git a/vendor/github.com/google/certificate-transparency-go/x509util/x509util.go b/vendor/github.com/google/certificate-transparency-go/x509util/x509util.go
new file mode 100644
index 00000000000..d3c20e1aa9e
--- /dev/null
+++ b/vendor/github.com/google/certificate-transparency-go/x509util/x509util.go
@@ -0,0 +1,900 @@
+// Copyright 2016 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package x509util includes utility code for working with X.509
+// certificates from the x509 package.
+package x509util
+
+import (
+ "bytes"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "net"
+ "strconv"
+
+ ct "github.com/google/certificate-transparency-go"
+ "github.com/google/certificate-transparency-go/asn1"
+ "github.com/google/certificate-transparency-go/gossip/minimal/x509ext"
+ "github.com/google/certificate-transparency-go/tls"
+ "github.com/google/certificate-transparency-go/x509"
+ "github.com/google/certificate-transparency-go/x509/pkix"
+)
+
+// OIDForStandardExtension indicates whether oid identifies a standard extension.
+// Standard extensions are listed in RFC 5280 (and other RFCs).
+func OIDForStandardExtension(oid asn1.ObjectIdentifier) bool {
+ if oid.Equal(x509.OIDExtensionSubjectKeyId) ||
+ oid.Equal(x509.OIDExtensionKeyUsage) ||
+ oid.Equal(x509.OIDExtensionExtendedKeyUsage) ||
+ oid.Equal(x509.OIDExtensionAuthorityKeyId) ||
+ oid.Equal(x509.OIDExtensionBasicConstraints) ||
+ oid.Equal(x509.OIDExtensionSubjectAltName) ||
+ oid.Equal(x509.OIDExtensionCertificatePolicies) ||
+ oid.Equal(x509.OIDExtensionNameConstraints) ||
+ oid.Equal(x509.OIDExtensionCRLDistributionPoints) ||
+ oid.Equal(x509.OIDExtensionIssuerAltName) ||
+ oid.Equal(x509.OIDExtensionSubjectDirectoryAttributes) ||
+ oid.Equal(x509.OIDExtensionInhibitAnyPolicy) ||
+ oid.Equal(x509.OIDExtensionPolicyConstraints) ||
+ oid.Equal(x509.OIDExtensionPolicyMappings) ||
+ oid.Equal(x509.OIDExtensionFreshestCRL) ||
+ oid.Equal(x509.OIDExtensionSubjectInfoAccess) ||
+ oid.Equal(x509.OIDExtensionAuthorityInfoAccess) ||
+ oid.Equal(x509.OIDExtensionIPPrefixList) ||
+ oid.Equal(x509.OIDExtensionASList) ||
+ oid.Equal(x509.OIDExtensionCTPoison) ||
+ oid.Equal(x509.OIDExtensionCTSCT) {
+ return true
+ }
+ return false
+}
+
+// OIDInExtensions checks whether the extension identified by oid is present in extensions
+// and returns how many times it occurs together with an indication of whether any of them
+// are marked critical.
+func OIDInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) (int, bool) {
+ count := 0
+ critical := false
+ for _, ext := range extensions {
+ if ext.Id.Equal(oid) {
+ count++
+ if ext.Critical {
+ critical = true
+ }
+ }
+ }
+ return count, critical
+}
+
+// String formatting for various X.509/ASN.1 types
+func bitStringToString(b asn1.BitString) string { // nolint:deadcode,unused
+ result := hex.EncodeToString(b.Bytes)
+ bitsLeft := b.BitLength % 8
+ if bitsLeft != 0 {
+ result += " (" + strconv.Itoa(8-bitsLeft) + " unused bits)"
+ }
+ return result
+}
+
+func publicKeyAlgorithmToString(algo x509.PublicKeyAlgorithm) string {
+ // Use OpenSSL-compatible strings for the algorithms.
+ switch algo {
+ case x509.RSA:
+ return "rsaEncryption"
+ case x509.DSA:
+ return "dsaEncryption"
+ case x509.ECDSA:
+ return "id-ecPublicKey"
+ default:
+ return strconv.Itoa(int(algo))
+ }
+}
+
+// appendHexData adds a hex dump of binary data to buf, with line breaks
+// after each set of count bytes, and with each new line prefixed with the
+// given prefix.
+func appendHexData(buf *bytes.Buffer, data []byte, count int, prefix string) {
+ for ii, b := range data {
+ if ii%count == 0 {
+ if ii > 0 {
+ buf.WriteString("\n")
+ }
+ buf.WriteString(prefix)
+ }
+ buf.WriteString(fmt.Sprintf("%02x:", b))
+ }
+}
+
+func curveOIDToString(oid asn1.ObjectIdentifier) (t string, bitlen int) {
+ switch {
+ case oid.Equal(x509.OIDNamedCurveP224):
+ return "secp224r1", 224
+ case oid.Equal(x509.OIDNamedCurveP256):
+ return "prime256v1", 256
+ case oid.Equal(x509.OIDNamedCurveP384):
+ return "secp384r1", 384
+ case oid.Equal(x509.OIDNamedCurveP521):
+ return "secp521r1", 521
+ case oid.Equal(x509.OIDNamedCurveP192):
+ return "secp192r1", 192
+ }
+ return fmt.Sprintf("%v", oid), -1
+}
+
+func publicKeyToString(_ x509.PublicKeyAlgorithm, pub interface{}) string {
+ var buf bytes.Buffer
+ switch pub := pub.(type) {
+ case *rsa.PublicKey:
+ bitlen := pub.N.BitLen()
+ buf.WriteString(fmt.Sprintf(" Public Key: (%d bit)\n", bitlen))
+ buf.WriteString(" Modulus:\n")
+ data := pub.N.Bytes()
+ appendHexData(&buf, data, 15, " ")
+ buf.WriteString("\n")
+ buf.WriteString(fmt.Sprintf(" Exponent: %d (0x%x)", pub.E, pub.E))
+ case *dsa.PublicKey:
+ buf.WriteString(" pub:\n")
+ appendHexData(&buf, pub.Y.Bytes(), 15, " ")
+ buf.WriteString("\n")
+ buf.WriteString(" P:\n")
+ appendHexData(&buf, pub.P.Bytes(), 15, " ")
+ buf.WriteString("\n")
+ buf.WriteString(" Q:\n")
+ appendHexData(&buf, pub.Q.Bytes(), 15, " ")
+ buf.WriteString("\n")
+ buf.WriteString(" G:\n")
+ appendHexData(&buf, pub.G.Bytes(), 15, " ")
+ case *ecdsa.PublicKey:
+ data := elliptic.Marshal(pub.Curve, pub.X, pub.Y)
+ oid, ok := x509.OIDFromNamedCurve(pub.Curve)
+ if !ok {
+ return " "
+ }
+ oidname, bitlen := curveOIDToString(oid)
+ buf.WriteString(fmt.Sprintf(" Public Key: (%d bit)\n", bitlen))
+ buf.WriteString(" pub:\n")
+ appendHexData(&buf, data, 15, " ")
+ buf.WriteString("\n")
+ buf.WriteString(fmt.Sprintf(" ASN1 OID: %s", oidname))
+ default:
+ buf.WriteString(fmt.Sprintf("%v", pub))
+ }
+ return buf.String()
+}
+
+func commaAppend(buf *bytes.Buffer, s string) {
+ if buf.Len() > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(s)
+}
+
+func keyUsageToString(k x509.KeyUsage) string {
+ var buf bytes.Buffer
+ if k&x509.KeyUsageDigitalSignature != 0 {
+ commaAppend(&buf, "Digital Signature")
+ }
+ if k&x509.KeyUsageContentCommitment != 0 {
+ commaAppend(&buf, "Content Commitment")
+ }
+ if k&x509.KeyUsageKeyEncipherment != 0 {
+ commaAppend(&buf, "Key Encipherment")
+ }
+ if k&x509.KeyUsageDataEncipherment != 0 {
+ commaAppend(&buf, "Data Encipherment")
+ }
+ if k&x509.KeyUsageKeyAgreement != 0 {
+ commaAppend(&buf, "Key Agreement")
+ }
+ if k&x509.KeyUsageCertSign != 0 {
+ commaAppend(&buf, "Certificate Signing")
+ }
+ if k&x509.KeyUsageCRLSign != 0 {
+ commaAppend(&buf, "CRL Signing")
+ }
+ if k&x509.KeyUsageEncipherOnly != 0 {
+ commaAppend(&buf, "Encipher Only")
+ }
+ if k&x509.KeyUsageDecipherOnly != 0 {
+ commaAppend(&buf, "Decipher Only")
+ }
+ return buf.String()
+}
+
+func extKeyUsageToString(u x509.ExtKeyUsage) string {
+ switch u {
+ case x509.ExtKeyUsageAny:
+ return "Any"
+ case x509.ExtKeyUsageServerAuth:
+ return "TLS Web server authentication"
+ case x509.ExtKeyUsageClientAuth:
+ return "TLS Web client authentication"
+ case x509.ExtKeyUsageCodeSigning:
+ return "Signing of executable code"
+ case x509.ExtKeyUsageEmailProtection:
+ return "Email protection"
+ case x509.ExtKeyUsageIPSECEndSystem:
+ return "IPSEC end system"
+ case x509.ExtKeyUsageIPSECTunnel:
+ return "IPSEC tunnel"
+ case x509.ExtKeyUsageIPSECUser:
+ return "IPSEC user"
+ case x509.ExtKeyUsageTimeStamping:
+ return "Time stamping"
+ case x509.ExtKeyUsageOCSPSigning:
+ return "OCSP signing"
+ case x509.ExtKeyUsageMicrosoftServerGatedCrypto:
+ return "Microsoft server gated cryptography"
+ case x509.ExtKeyUsageNetscapeServerGatedCrypto:
+ return "Netscape server gated cryptography"
+ case x509.ExtKeyUsageCertificateTransparency:
+ return "Certificate transparency"
+ default:
+ return "Unknown"
+ }
+}
+
+func attributeOIDToString(oid asn1.ObjectIdentifier) string { // nolint:deadcode,unused
+ switch {
+ case oid.Equal(pkix.OIDCountry):
+ return "Country"
+ case oid.Equal(pkix.OIDOrganization):
+ return "Organization"
+ case oid.Equal(pkix.OIDOrganizationalUnit):
+ return "OrganizationalUnit"
+ case oid.Equal(pkix.OIDCommonName):
+ return "CommonName"
+ case oid.Equal(pkix.OIDSerialNumber):
+ return "SerialNumber"
+ case oid.Equal(pkix.OIDLocality):
+ return "Locality"
+ case oid.Equal(pkix.OIDProvince):
+ return "Province"
+ case oid.Equal(pkix.OIDStreetAddress):
+ return "StreetAddress"
+ case oid.Equal(pkix.OIDPostalCode):
+ return "PostalCode"
+ case oid.Equal(pkix.OIDPseudonym):
+ return "Pseudonym"
+ case oid.Equal(pkix.OIDTitle):
+ return "Title"
+ case oid.Equal(pkix.OIDDnQualifier):
+ return "DnQualifier"
+ case oid.Equal(pkix.OIDName):
+ return "Name"
+ case oid.Equal(pkix.OIDSurname):
+ return "Surname"
+ case oid.Equal(pkix.OIDGivenName):
+ return "GivenName"
+ case oid.Equal(pkix.OIDInitials):
+ return "Initials"
+ case oid.Equal(pkix.OIDGenerationQualifier):
+ return "GenerationQualifier"
+ default:
+ return oid.String()
+ }
+}
+
+// NameToString creates a string description of a pkix.Name object.
+func NameToString(name pkix.Name) string {
+ var result bytes.Buffer
+ addSingle := func(prefix, item string) {
+ if len(item) == 0 {
+ return
+ }
+ commaAppend(&result, prefix)
+ result.WriteString(item)
+ }
+ addList := func(prefix string, items []string) {
+ for _, item := range items {
+ addSingle(prefix, item)
+ }
+ }
+ addList("C=", name.Country)
+ addList("O=", name.Organization)
+ addList("OU=", name.OrganizationalUnit)
+ addList("L=", name.Locality)
+ addList("ST=", name.Province)
+ addList("streetAddress=", name.StreetAddress)
+ addList("postalCode=", name.PostalCode)
+ addSingle("serialNumber=", name.SerialNumber)
+ addSingle("CN=", name.CommonName)
+ for _, atv := range name.Names {
+ value, ok := atv.Value.(string)
+ if !ok {
+ continue
+ }
+ t := atv.Type
+ // All of the defined attribute OIDs are of the form 2.5.4.N, and OIDAttribute is
+ // the 2.5.4 prefix ('id-at' in RFC 5280).
+ if len(t) == 4 && t[0] == pkix.OIDAttribute[0] && t[1] == pkix.OIDAttribute[1] && t[2] == pkix.OIDAttribute[2] {
+ // OID is 'id-at N', so check the final value to figure out which attribute.
+ switch t[3] {
+ case pkix.OIDCommonName[3], pkix.OIDSerialNumber[3], pkix.OIDCountry[3], pkix.OIDLocality[3], pkix.OIDProvince[3],
+ pkix.OIDStreetAddress[3], pkix.OIDOrganization[3], pkix.OIDOrganizationalUnit[3], pkix.OIDPostalCode[3]:
+ continue // covered by explicit fields
+ case pkix.OIDPseudonym[3]:
+ addSingle("pseudonym=", value)
+ continue
+ case pkix.OIDTitle[3]:
+ addSingle("title=", value)
+ continue
+ case pkix.OIDDnQualifier[3]:
+ addSingle("dnQualifier=", value)
+ continue
+ case pkix.OIDName[3]:
+ addSingle("name=", value)
+ continue
+ case pkix.OIDSurname[3]:
+ addSingle("surname=", value)
+ continue
+ case pkix.OIDGivenName[3]:
+ addSingle("givenName=", value)
+ continue
+ case pkix.OIDInitials[3]:
+ addSingle("initials=", value)
+ continue
+ case pkix.OIDGenerationQualifier[3]:
+ addSingle("generationQualifier=", value)
+ continue
+ }
+ }
+ addSingle(t.String()+"=", value)
+ }
+ return result.String()
+}
+
+// OtherNameToString creates a string description of an x509.OtherName object.
+func OtherNameToString(other x509.OtherName) string {
+ return fmt.Sprintf("%v=%v", other.TypeID, hex.EncodeToString(other.Value.Bytes))
+}
+
+// GeneralNamesToString creates a string description of an x509.GeneralNames object.
+func GeneralNamesToString(gname *x509.GeneralNames) string {
+ var buf bytes.Buffer
+ for _, name := range gname.DNSNames {
+ commaAppend(&buf, "DNS:"+name)
+ }
+ for _, email := range gname.EmailAddresses {
+ commaAppend(&buf, "email:"+email)
+ }
+ for _, name := range gname.DirectoryNames {
+ commaAppend(&buf, "DirName:"+NameToString(name))
+ }
+ for _, uri := range gname.URIs {
+ commaAppend(&buf, "URI:"+uri)
+ }
+ for _, ip := range gname.IPNets {
+ if ip.Mask == nil {
+ commaAppend(&buf, "IP Address:"+ip.IP.String())
+ } else {
+ commaAppend(&buf, "IP Address:"+ip.IP.String()+"/"+ip.Mask.String())
+ }
+ }
+ for _, id := range gname.RegisteredIDs {
+ commaAppend(&buf, "Registered ID:"+id.String())
+ }
+ for _, other := range gname.OtherNames {
+ commaAppend(&buf, "othername:"+OtherNameToString(other))
+ }
+ return buf.String()
+}
+
+// CertificateToString generates a string describing the given certificate.
+// The output roughly resembles that from openssl x509 -text.
+func CertificateToString(cert *x509.Certificate) string {
+ var result bytes.Buffer
+ result.WriteString("Certificate:\n")
+ result.WriteString(" Data:\n")
+ result.WriteString(fmt.Sprintf(" Version: %d (%#x)\n", cert.Version, cert.Version-1))
+ result.WriteString(fmt.Sprintf(" Serial Number: %s (0x%s)\n", cert.SerialNumber.Text(10), cert.SerialNumber.Text(16)))
+ result.WriteString(fmt.Sprintf(" Signature Algorithm: %v\n", cert.SignatureAlgorithm))
+ result.WriteString(fmt.Sprintf(" Issuer: %v\n", NameToString(cert.Issuer)))
+ result.WriteString(" Validity:\n")
+ result.WriteString(fmt.Sprintf(" Not Before: %v\n", cert.NotBefore))
+ result.WriteString(fmt.Sprintf(" Not After : %v\n", cert.NotAfter))
+ result.WriteString(fmt.Sprintf(" Subject: %v\n", NameToString(cert.Subject)))
+ result.WriteString(" Subject Public Key Info:\n")
+ result.WriteString(fmt.Sprintf(" Public Key Algorithm: %v\n", publicKeyAlgorithmToString(cert.PublicKeyAlgorithm)))
+ result.WriteString(fmt.Sprintf("%v\n", publicKeyToString(cert.PublicKeyAlgorithm, cert.PublicKey)))
+
+ if len(cert.Extensions) > 0 {
+ result.WriteString(" X509v3 extensions:\n")
+ }
+ // First display the extensions that are already cracked out
+ showAuthKeyID(&result, cert)
+ showSubjectKeyID(&result, cert)
+ showKeyUsage(&result, cert)
+ showExtendedKeyUsage(&result, cert)
+ showBasicConstraints(&result, cert)
+ showSubjectAltName(&result, cert)
+ showNameConstraints(&result, cert)
+ showCertPolicies(&result, cert)
+ showCRLDPs(&result, cert)
+ showAuthInfoAccess(&result, cert)
+ showSubjectInfoAccess(&result, cert)
+ showRPKIAddressRanges(&result, cert)
+ showRPKIASIdentifiers(&result, cert)
+ showCTPoison(&result, cert)
+ showCTSCT(&result, cert)
+ showCTLogSTHInfo(&result, cert)
+
+ showUnhandledExtensions(&result, cert)
+ showSignature(&result, cert)
+
+ return result.String()
+}
+
+func showCritical(result *bytes.Buffer, critical bool) {
+ if critical {
+ result.WriteString(" critical")
+ }
+ result.WriteString("\n")
+}
+
+func showAuthKeyID(result *bytes.Buffer, cert *x509.Certificate) {
+ count, critical := OIDInExtensions(x509.OIDExtensionAuthorityKeyId, cert.Extensions)
+ if count > 0 {
+ result.WriteString(" X509v3 Authority Key Identifier:")
+ showCritical(result, critical)
+ result.WriteString(fmt.Sprintf(" keyid:%v\n", hex.EncodeToString(cert.AuthorityKeyId)))
+ }
+}
+
+func showSubjectKeyID(result *bytes.Buffer, cert *x509.Certificate) {
+ count, critical := OIDInExtensions(x509.OIDExtensionSubjectKeyId, cert.Extensions)
+ if count > 0 {
+ result.WriteString(" X509v3 Subject Key Identifier:")
+ showCritical(result, critical)
+ result.WriteString(fmt.Sprintf(" keyid:%v\n", hex.EncodeToString(cert.SubjectKeyId)))
+ }
+}
+
+func showKeyUsage(result *bytes.Buffer, cert *x509.Certificate) {
+ count, critical := OIDInExtensions(x509.OIDExtensionKeyUsage, cert.Extensions)
+ if count > 0 {
+ result.WriteString(" X509v3 Key Usage:")
+ showCritical(result, critical)
+ result.WriteString(fmt.Sprintf(" %v\n", keyUsageToString(cert.KeyUsage)))
+ }
+}
+
+func showExtendedKeyUsage(result *bytes.Buffer, cert *x509.Certificate) {
+ count, critical := OIDInExtensions(x509.OIDExtensionExtendedKeyUsage, cert.Extensions)
+ if count > 0 {
+ result.WriteString(" X509v3 Extended Key Usage:")
+ showCritical(result, critical)
+ var usages bytes.Buffer
+ for _, usage := range cert.ExtKeyUsage {
+ commaAppend(&usages, extKeyUsageToString(usage))
+ }
+ for _, oid := range cert.UnknownExtKeyUsage {
+ commaAppend(&usages, oid.String())
+ }
+ result.WriteString(fmt.Sprintf(" %v\n", usages.String()))
+ }
+}
+
+func showBasicConstraints(result *bytes.Buffer, cert *x509.Certificate) {
+ count, critical := OIDInExtensions(x509.OIDExtensionBasicConstraints, cert.Extensions)
+ if count > 0 {
+ result.WriteString(" X509v3 Basic Constraints:")
+ showCritical(result, critical)
+ result.WriteString(fmt.Sprintf(" CA:%t", cert.IsCA))
+ if cert.MaxPathLen > 0 || cert.MaxPathLenZero {
+ result.WriteString(fmt.Sprintf(", pathlen:%d", cert.MaxPathLen))
+ }
+ result.WriteString("\n")
+ }
+}
+
+func showSubjectAltName(result *bytes.Buffer, cert *x509.Certificate) {
+ count, critical := OIDInExtensions(x509.OIDExtensionSubjectAltName, cert.Extensions)
+ if count > 0 {
+ result.WriteString(" X509v3 Subject Alternative Name:")
+ showCritical(result, critical)
+ var buf bytes.Buffer
+ for _, name := range cert.DNSNames {
+ commaAppend(&buf, "DNS:"+name)
+ }
+ for _, email := range cert.EmailAddresses {
+ commaAppend(&buf, "email:"+email)
+ }
+ for _, ip := range cert.IPAddresses {
+ commaAppend(&buf, "IP Address:"+ip.String())
+ }
+
+ result.WriteString(fmt.Sprintf(" %v\n", buf.String()))
+ // TODO(drysdale): include other name forms
+ }
+}
+
+func showNameConstraints(result *bytes.Buffer, cert *x509.Certificate) {
+ count, critical := OIDInExtensions(x509.OIDExtensionNameConstraints, cert.Extensions)
+ if count > 0 {
+ result.WriteString(" X509v3 Name Constraints:")
+ showCritical(result, critical)
+ if len(cert.PermittedDNSDomains) > 0 {
+ result.WriteString(" Permitted:\n")
+ var buf bytes.Buffer
+ for _, name := range cert.PermittedDNSDomains {
+ commaAppend(&buf, "DNS:"+name)
+ }
+ result.WriteString(fmt.Sprintf(" %v\n", buf.String()))
+ }
+ // TODO(drysdale): include other name forms
+ }
+
+}
+
+func showCertPolicies(result *bytes.Buffer, cert *x509.Certificate) {
+ count, critical := OIDInExtensions(x509.OIDExtensionCertificatePolicies, cert.Extensions)
+ if count > 0 {
+ result.WriteString(" X509v3 Certificate Policies:")
+ showCritical(result, critical)
+ for _, oid := range cert.PolicyIdentifiers {
+ result.WriteString(fmt.Sprintf(" Policy: %v\n", oid.String()))
+ // TODO(drysdale): Display any qualifiers associated with the policy
+ }
+ }
+
+}
+
+func showCRLDPs(result *bytes.Buffer, cert *x509.Certificate) {
+ count, critical := OIDInExtensions(x509.OIDExtensionCRLDistributionPoints, cert.Extensions)
+ if count > 0 {
+ result.WriteString(" X509v3 CRL Distribution Points:")
+ showCritical(result, critical)
+ result.WriteString(" Full Name:\n")
+ var buf bytes.Buffer
+ for _, pt := range cert.CRLDistributionPoints {
+ commaAppend(&buf, "URI:"+pt)
+ }
+ result.WriteString(fmt.Sprintf(" %v\n", buf.String()))
+ // TODO(drysdale): Display other GeneralNames types, plus issuer/reasons/relative-name
+ }
+
+}
+
+func showAuthInfoAccess(result *bytes.Buffer, cert *x509.Certificate) {
+ count, critical := OIDInExtensions(x509.OIDExtensionAuthorityInfoAccess, cert.Extensions)
+ if count > 0 {
+ result.WriteString(" Authority Information Access:")
+ showCritical(result, critical)
+ var issuerBuf bytes.Buffer
+ for _, issuer := range cert.IssuingCertificateURL {
+ commaAppend(&issuerBuf, "URI:"+issuer)
+ }
+ if issuerBuf.Len() > 0 {
+ result.WriteString(fmt.Sprintf(" CA Issuers - %v\n", issuerBuf.String()))
+ }
+ var ocspBuf bytes.Buffer
+ for _, ocsp := range cert.OCSPServer {
+ commaAppend(&ocspBuf, "URI:"+ocsp)
+ }
+ if ocspBuf.Len() > 0 {
+ result.WriteString(fmt.Sprintf(" OCSP - %v\n", ocspBuf.String()))
+ }
+ // TODO(drysdale): Display other GeneralNames types
+ }
+}
+
+func showSubjectInfoAccess(result *bytes.Buffer, cert *x509.Certificate) {
+ count, critical := OIDInExtensions(x509.OIDExtensionSubjectInfoAccess, cert.Extensions)
+ if count > 0 {
+ result.WriteString(" Subject Information Access:")
+ showCritical(result, critical)
+ var tsBuf bytes.Buffer
+ for _, ts := range cert.SubjectTimestamps {
+ commaAppend(&tsBuf, "URI:"+ts)
+ }
+ if tsBuf.Len() > 0 {
+ result.WriteString(fmt.Sprintf(" AD Time Stamping - %v\n", tsBuf.String()))
+ }
+ var repoBuf bytes.Buffer
+ for _, repo := range cert.SubjectCARepositories {
+ commaAppend(&repoBuf, "URI:"+repo)
+ }
+ if repoBuf.Len() > 0 {
+ result.WriteString(fmt.Sprintf(" CA repository - %v\n", repoBuf.String()))
+ }
+ }
+}
+
+func showAddressRange(prefix x509.IPAddressPrefix, afi uint16) string {
+ switch afi {
+ case x509.IPv4AddressFamilyIndicator, x509.IPv6AddressFamilyIndicator:
+ size := 4
+ if afi == x509.IPv6AddressFamilyIndicator {
+ size = 16
+ }
+ ip := make([]byte, size)
+ copy(ip, prefix.Bytes)
+ addr := net.IPNet{IP: ip, Mask: net.CIDRMask(prefix.BitLength, 8*size)}
+ return addr.String()
+ default:
+ return fmt.Sprintf("%x/%d", prefix.Bytes, prefix.BitLength)
+ }
+
+}
+
+func showRPKIAddressRanges(result *bytes.Buffer, cert *x509.Certificate) {
+ count, critical := OIDInExtensions(x509.OIDExtensionIPPrefixList, cert.Extensions)
+ if count > 0 {
+ result.WriteString(" sbgp-ipAddrBlock:")
+ showCritical(result, critical)
+ for _, blocks := range cert.RPKIAddressRanges {
+ afi := blocks.AFI
+ switch afi {
+ case x509.IPv4AddressFamilyIndicator:
+ result.WriteString(" IPv4")
+ case x509.IPv6AddressFamilyIndicator:
+ result.WriteString(" IPv6")
+ default:
+ result.WriteString(fmt.Sprintf(" %d", afi))
+ }
+ if blocks.SAFI != 0 {
+ result.WriteString(fmt.Sprintf(" SAFI=%d", blocks.SAFI))
+ }
+ result.WriteString(":")
+ if blocks.InheritFromIssuer {
+ result.WriteString(" inherit\n")
+ continue
+ }
+ result.WriteString("\n")
+ for _, prefix := range blocks.AddressPrefixes {
+ result.WriteString(fmt.Sprintf(" %s\n", showAddressRange(prefix, afi)))
+ }
+ for _, ipRange := range blocks.AddressRanges {
+ result.WriteString(fmt.Sprintf(" [%s, %s]\n", showAddressRange(ipRange.Min, afi), showAddressRange(ipRange.Max, afi)))
+ }
+ }
+ }
+}
+
+func showASIDs(result *bytes.Buffer, asids *x509.ASIdentifiers, label string) {
+ if asids == nil {
+ return
+ }
+ result.WriteString(fmt.Sprintf(" %s:\n", label))
+ if asids.InheritFromIssuer {
+ result.WriteString(" inherit\n")
+ return
+ }
+ for _, id := range asids.ASIDs {
+ result.WriteString(fmt.Sprintf(" %d\n", id))
+ }
+ for _, idRange := range asids.ASIDRanges {
+ result.WriteString(fmt.Sprintf(" %d-%d\n", idRange.Min, idRange.Max))
+ }
+}
+
+func showRPKIASIdentifiers(result *bytes.Buffer, cert *x509.Certificate) {
+ count, critical := OIDInExtensions(x509.OIDExtensionASList, cert.Extensions)
+ if count > 0 {
+ result.WriteString(" sbgp-autonomousSysNum:")
+ showCritical(result, critical)
+ showASIDs(result, cert.RPKIASNumbers, "Autonomous System Numbers")
+ showASIDs(result, cert.RPKIRoutingDomainIDs, "Routing Domain Identifiers")
+ }
+}
+func showCTPoison(result *bytes.Buffer, cert *x509.Certificate) {
+ count, critical := OIDInExtensions(x509.OIDExtensionCTPoison, cert.Extensions)
+ if count > 0 {
+ result.WriteString(" RFC6962 Pre-Certificate Poison:")
+ showCritical(result, critical)
+ result.WriteString(" .....\n")
+ }
+}
+
+func showCTSCT(result *bytes.Buffer, cert *x509.Certificate) {
+ count, critical := OIDInExtensions(x509.OIDExtensionCTSCT, cert.Extensions)
+ if count > 0 {
+ result.WriteString(" RFC6962 Certificate Transparency SCT:")
+ showCritical(result, critical)
+ for i, sctData := range cert.SCTList.SCTList {
+ result.WriteString(fmt.Sprintf(" SCT [%d]:\n", i))
+ var sct ct.SignedCertificateTimestamp
+ _, err := tls.Unmarshal(sctData.Val, &sct)
+ if err != nil {
+ appendHexData(result, sctData.Val, 16, " ")
+ result.WriteString("\n")
+ continue
+ }
+ result.WriteString(fmt.Sprintf(" Version: %d\n", sct.SCTVersion))
+ result.WriteString(fmt.Sprintf(" LogID: %s\n", base64.StdEncoding.EncodeToString(sct.LogID.KeyID[:])))
+ result.WriteString(fmt.Sprintf(" Timestamp: %d\n", sct.Timestamp))
+ result.WriteString(fmt.Sprintf(" Signature: %s\n", sct.Signature.Algorithm))
+ result.WriteString(" Signature:\n")
+ appendHexData(result, sct.Signature.Signature, 16, " ")
+ result.WriteString("\n")
+ }
+ }
+}
+
+func showCTLogSTHInfo(result *bytes.Buffer, cert *x509.Certificate) {
+ count, critical := OIDInExtensions(x509ext.OIDExtensionCTSTH, cert.Extensions)
+ if count > 0 {
+ result.WriteString(" Certificate Transparency STH:")
+ showCritical(result, critical)
+ sthInfo, err := x509ext.LogSTHInfoFromCert(cert)
+ if err != nil {
+ result.WriteString(" Failed to decode STH:\n")
+ return
+ }
+ result.WriteString(fmt.Sprintf(" LogURL: %s\n", string(sthInfo.LogURL)))
+ result.WriteString(fmt.Sprintf(" Version: %d\n", sthInfo.Version))
+ result.WriteString(fmt.Sprintf(" TreeSize: %d\n", sthInfo.TreeSize))
+ result.WriteString(fmt.Sprintf(" Timestamp: %d\n", sthInfo.Timestamp))
+ result.WriteString(" RootHash:\n")
+ appendHexData(result, sthInfo.SHA256RootHash[:], 16, " ")
+ result.WriteString("\n")
+ result.WriteString(fmt.Sprintf(" TreeHeadSignature: %s\n", sthInfo.TreeHeadSignature.Algorithm))
+ appendHexData(result, sthInfo.TreeHeadSignature.Signature, 16, " ")
+ result.WriteString("\n")
+ }
+}
+
+func showUnhandledExtensions(result *bytes.Buffer, cert *x509.Certificate) {
+ for _, ext := range cert.Extensions {
+ // Skip extensions that are already cracked out
+ if oidAlreadyPrinted(ext.Id) {
+ continue
+ }
+ result.WriteString(fmt.Sprintf(" %v:", ext.Id))
+ showCritical(result, ext.Critical)
+ appendHexData(result, ext.Value, 16, " ")
+ result.WriteString("\n")
+ }
+}
+
+func showSignature(result *bytes.Buffer, cert *x509.Certificate) {
+ result.WriteString(fmt.Sprintf(" Signature Algorithm: %v\n", cert.SignatureAlgorithm))
+ appendHexData(result, cert.Signature, 18, " ")
+ result.WriteString("\n")
+}
+
+// TODO(drysdale): remove this once all standard OIDs are parsed and printed.
+func oidAlreadyPrinted(oid asn1.ObjectIdentifier) bool {
+ if oid.Equal(x509.OIDExtensionSubjectKeyId) ||
+ oid.Equal(x509.OIDExtensionKeyUsage) ||
+ oid.Equal(x509.OIDExtensionExtendedKeyUsage) ||
+ oid.Equal(x509.OIDExtensionAuthorityKeyId) ||
+ oid.Equal(x509.OIDExtensionBasicConstraints) ||
+ oid.Equal(x509.OIDExtensionSubjectAltName) ||
+ oid.Equal(x509.OIDExtensionCertificatePolicies) ||
+ oid.Equal(x509.OIDExtensionNameConstraints) ||
+ oid.Equal(x509.OIDExtensionCRLDistributionPoints) ||
+ oid.Equal(x509.OIDExtensionAuthorityInfoAccess) ||
+ oid.Equal(x509.OIDExtensionSubjectInfoAccess) ||
+ oid.Equal(x509.OIDExtensionIPPrefixList) ||
+ oid.Equal(x509.OIDExtensionASList) ||
+ oid.Equal(x509.OIDExtensionCTPoison) ||
+ oid.Equal(x509.OIDExtensionCTSCT) ||
+ oid.Equal(x509ext.OIDExtensionCTSTH) {
+ return true
+ }
+ return false
+}
+
+// CertificateFromPEM takes a certificate in PEM format and returns the
+// corresponding x509.Certificate object.
+func CertificateFromPEM(pemBytes []byte) (*x509.Certificate, error) {
+ block, rest := pem.Decode(pemBytes)
+ if len(rest) != 0 {
+ return nil, errors.New("trailing data found after PEM block")
+ }
+ if block == nil {
+ return nil, errors.New("PEM block is nil")
+ }
+ if block.Type != "CERTIFICATE" {
+ return nil, errors.New("PEM block is not a CERTIFICATE")
+ }
+ return x509.ParseCertificate(block.Bytes)
+}
+
+// CertificatesFromPEM parses one or more certificates from the given PEM data.
+// The PEM certificates must be concatenated. This function can be used for
+// parsing PEM-formatted certificate chains, but does not verify that the
+// resulting chain is a valid certificate chain.
+func CertificatesFromPEM(pemBytes []byte) ([]*x509.Certificate, error) {
+ var chain []*x509.Certificate
+ for {
+ var block *pem.Block
+ block, pemBytes = pem.Decode(pemBytes)
+ if block == nil {
+ return chain, nil
+ }
+ if block.Type != "CERTIFICATE" {
+ return nil, fmt.Errorf("PEM block is not a CERTIFICATE")
+ }
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, errors.New("failed to parse certificate")
+ }
+ chain = append(chain, cert)
+ }
+}
+
+// ParseSCTsFromSCTList parses each of the SCTs contained within an SCT list.
+func ParseSCTsFromSCTList(sctList *x509.SignedCertificateTimestampList) ([]*ct.SignedCertificateTimestamp, error) {
+ var scts []*ct.SignedCertificateTimestamp
+ for i, data := range sctList.SCTList {
+ sct, err := ExtractSCT(&data)
+ if err != nil {
+ return nil, fmt.Errorf("error extracting SCT number %d: %s", i, err)
+ }
+ scts = append(scts, sct)
+ }
+ return scts, nil
+}
+
+// ExtractSCT deserializes an SCT from a TLS-encoded SCT.
+func ExtractSCT(sctData *x509.SerializedSCT) (*ct.SignedCertificateTimestamp, error) {
+ if sctData == nil {
+ return nil, errors.New("SCT is nil")
+ }
+ var sct ct.SignedCertificateTimestamp
+ if rest, err := tls.Unmarshal(sctData.Val, &sct); err != nil {
+ return nil, fmt.Errorf("error parsing SCT: %s", err)
+ } else if len(rest) > 0 {
+ return nil, fmt.Errorf("extra data (%d bytes) after serialized SCT", len(rest))
+ }
+ return &sct, nil
+}
+
+// MarshalSCTsIntoSCTList serializes SCTs into SCT list.
+func MarshalSCTsIntoSCTList(scts []*ct.SignedCertificateTimestamp) (*x509.SignedCertificateTimestampList, error) {
+ var sctList x509.SignedCertificateTimestampList
+ sctList.SCTList = []x509.SerializedSCT{}
+ for i, sct := range scts {
+ if sct == nil {
+ return nil, fmt.Errorf("SCT number %d is nil", i)
+ }
+ encd, err := tls.Marshal(*sct)
+ if err != nil {
+ return nil, fmt.Errorf("error serializing SCT number %d: %s", i, err)
+ }
+ sctData := x509.SerializedSCT{Val: encd}
+ sctList.SCTList = append(sctList.SCTList, sctData)
+ }
+ return &sctList, nil
+}
+
+var pemCertificatePrefix = []byte("-----BEGIN CERTIFICATE")
+
+// ParseSCTsFromCertificate parses any SCTs that are embedded in the
+// certificate provided. The certificate bytes provided can be either DER or
+// PEM, provided the PEM data starts with the PEM block marker (i.e. has no
+// leading text).
+func ParseSCTsFromCertificate(certBytes []byte) ([]*ct.SignedCertificateTimestamp, error) {
+ var cert *x509.Certificate
+ var err error
+ if bytes.HasPrefix(certBytes, pemCertificatePrefix) {
+ cert, err = CertificateFromPEM(certBytes)
+ } else {
+ cert, err = x509.ParseCertificate(certBytes)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse certificate: %s", err)
+ }
+ return ParseSCTsFromSCTList(&cert.SCTList)
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go
index 960c93b5f41..b62d8482691 100644
--- a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go
@@ -27,10 +27,11 @@ import (
// docker_version and os.version are not part of the spec but included
// for backwards compatibility.
type ConfigFile struct {
- Architecture string `json:"architecture"`
- Author string `json:"author,omitempty"`
- Container string `json:"container,omitempty"`
- Created Time `json:"created,omitempty"`
+ Architecture string `json:"architecture"`
+ Author string `json:"author,omitempty"`
+ Container string `json:"container,omitempty"`
+ Created Time `json:"created,omitempty"`
+ // Deprecated: This field is deprecated and will be removed in the next release.
DockerVersion string `json:"docker_version,omitempty"`
History []History `json:"history,omitempty"`
OS string `json:"os"`
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE
new file mode 100644
index 00000000000..364516251b9
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2015, Gengo, Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ * Neither the name of Gengo, Inc. nor the names of its
+ contributors may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel
new file mode 100644
index 00000000000..b8fbb2b77c4
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel
@@ -0,0 +1,35 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+ name = "httprule",
+ srcs = [
+ "compile.go",
+ "parse.go",
+ "types.go",
+ ],
+ importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule",
+ deps = ["//utilities"],
+)
+
+go_test(
+ name = "httprule_test",
+ size = "small",
+ srcs = [
+ "compile_test.go",
+ "parse_test.go",
+ "types_test.go",
+ ],
+ embed = [":httprule"],
+ deps = [
+ "//utilities",
+ "@org_golang_google_grpc//grpclog",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":httprule",
+ visibility = ["//:__subpackages__"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go
new file mode 100644
index 00000000000..3cd9372959d
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go
@@ -0,0 +1,121 @@
+package httprule
+
+import (
+ "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+)
+
+const (
+ opcodeVersion = 1
+)
+
+// Template is a compiled representation of path templates.
+type Template struct {
+ // Version is the version number of the format.
+ Version int
+ // OpCodes is a sequence of operations.
+ OpCodes []int
+ // Pool is a constant pool
+ Pool []string
+ // Verb is a VERB part in the template.
+ Verb string
+ // Fields is a list of field paths bound in this template.
+ Fields []string
+ // Original template (example: /v1/a_bit_of_everything)
+ Template string
+}
+
+// Compiler compiles utilities representation of path templates into marshallable operations.
+// They can be unmarshalled by runtime.NewPattern.
+type Compiler interface {
+ Compile() Template
+}
+
+type op struct {
+ // code is the opcode of the operation
+ code utilities.OpCode
+
+ // str is a string operand of the code.
+ // num is ignored if str is not empty.
+ str string
+
+ // num is a numeric operand of the code.
+ num int
+}
+
+func (w wildcard) compile() []op {
+ return []op{
+ {code: utilities.OpPush},
+ }
+}
+
+func (w deepWildcard) compile() []op {
+ return []op{
+ {code: utilities.OpPushM},
+ }
+}
+
+func (l literal) compile() []op {
+ return []op{
+ {
+ code: utilities.OpLitPush,
+ str: string(l),
+ },
+ }
+}
+
+func (v variable) compile() []op {
+ var ops []op
+ for _, s := range v.segments {
+ ops = append(ops, s.compile()...)
+ }
+ ops = append(ops, op{
+ code: utilities.OpConcatN,
+ num: len(v.segments),
+ }, op{
+ code: utilities.OpCapture,
+ str: v.path,
+ })
+
+ return ops
+}
+
+func (t template) Compile() Template {
+ var rawOps []op
+ for _, s := range t.segments {
+ rawOps = append(rawOps, s.compile()...)
+ }
+
+ var (
+ ops []int
+ pool []string
+ fields []string
+ )
+ consts := make(map[string]int)
+ for _, op := range rawOps {
+ ops = append(ops, int(op.code))
+ if op.str == "" {
+ ops = append(ops, op.num)
+ } else {
+ // eof segment literal represents the "/" path pattern
+ if op.str == eof {
+ op.str = ""
+ }
+ if _, ok := consts[op.str]; !ok {
+ consts[op.str] = len(pool)
+ pool = append(pool, op.str)
+ }
+ ops = append(ops, consts[op.str])
+ }
+ if op.code == utilities.OpCapture {
+ fields = append(fields, op.str)
+ }
+ }
+ return Template{
+ Version: opcodeVersion,
+ OpCodes: ops,
+ Pool: pool,
+ Verb: t.verb,
+ Fields: fields,
+ Template: t.template,
+ }
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
new file mode 100644
index 00000000000..c056bd3058a
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
@@ -0,0 +1,11 @@
+//go:build gofuzz
+// +build gofuzz
+
+package httprule
+
+func Fuzz(data []byte) int {
+ if _, err := Parse(string(data)); err != nil {
+ return 0
+ }
+ return 0
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
new file mode 100644
index 00000000000..65ffcf5cf87
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
@@ -0,0 +1,368 @@
+package httprule
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// InvalidTemplateError indicates that the path template is not valid.
+type InvalidTemplateError struct {
+ tmpl string
+ msg string
+}
+
+func (e InvalidTemplateError) Error() string {
+ return fmt.Sprintf("%s: %s", e.msg, e.tmpl)
+}
+
+// Parse parses the string representation of path template
+func Parse(tmpl string) (Compiler, error) {
+ if !strings.HasPrefix(tmpl, "/") {
+ return template{}, InvalidTemplateError{tmpl: tmpl, msg: "no leading /"}
+ }
+ tokens, verb := tokenize(tmpl[1:])
+
+ p := parser{tokens: tokens}
+ segs, err := p.topLevelSegments()
+ if err != nil {
+ return template{}, InvalidTemplateError{tmpl: tmpl, msg: err.Error()}
+ }
+
+ return template{
+ segments: segs,
+ verb: verb,
+ template: tmpl,
+ }, nil
+}
+
+func tokenize(path string) (tokens []string, verb string) {
+ if path == "" {
+ return []string{eof}, ""
+ }
+
+ const (
+ init = iota
+ field
+ nested
+ )
+ st := init
+ for path != "" {
+ var idx int
+ switch st {
+ case init:
+ idx = strings.IndexAny(path, "/{")
+ case field:
+ idx = strings.IndexAny(path, ".=}")
+ case nested:
+ idx = strings.IndexAny(path, "/}")
+ }
+ if idx < 0 {
+ tokens = append(tokens, path)
+ break
+ }
+ switch r := path[idx]; r {
+ case '/', '.':
+ case '{':
+ st = field
+ case '=':
+ st = nested
+ case '}':
+ st = init
+ }
+ if idx == 0 {
+ tokens = append(tokens, path[idx:idx+1])
+ } else {
+ tokens = append(tokens, path[:idx], path[idx:idx+1])
+ }
+ path = path[idx+1:]
+ }
+
+ l := len(tokens)
+ // See
+ // https://github.com/grpc-ecosystem/grpc-gateway/pull/1947#issuecomment-774523693 ;
+ // although normal and backwards-compat logic here is to use the last index
+ // of a colon, if the final segment is a variable followed by a colon, the
+ // part following the colon must be a verb. Hence if the previous token is
+ // an end var marker, we switch the index we're looking for to Index instead
+ // of LastIndex, so that we correctly grab the remaining part of the path as
+ // the verb.
+ var penultimateTokenIsEndVar bool
+ switch l {
+ case 0, 1:
+ // Not enough to be variable so skip this logic and don't result in an
+ // invalid index
+ default:
+ penultimateTokenIsEndVar = tokens[l-2] == "}"
+ }
+ t := tokens[l-1]
+ var idx int
+ if penultimateTokenIsEndVar {
+ idx = strings.Index(t, ":")
+ } else {
+ idx = strings.LastIndex(t, ":")
+ }
+ if idx == 0 {
+ tokens, verb = tokens[:l-1], t[1:]
+ } else if idx > 0 {
+ tokens[l-1], verb = t[:idx], t[idx+1:]
+ }
+ tokens = append(tokens, eof)
+ return tokens, verb
+}
+
+// parser is a parser of the template syntax defined in github.com/googleapis/googleapis/google/api/http.proto.
+type parser struct {
+ tokens []string
+ accepted []string
+}
+
+// topLevelSegments is the target of this parser.
+func (p *parser) topLevelSegments() ([]segment, error) {
+ if _, err := p.accept(typeEOF); err == nil {
+ p.tokens = p.tokens[:0]
+ return []segment{literal(eof)}, nil
+ }
+ segs, err := p.segments()
+ if err != nil {
+ return nil, err
+ }
+ if _, err := p.accept(typeEOF); err != nil {
+ return nil, fmt.Errorf("unexpected token %q after segments %q", p.tokens[0], strings.Join(p.accepted, ""))
+ }
+ return segs, nil
+}
+
+func (p *parser) segments() ([]segment, error) {
+ s, err := p.segment()
+ if err != nil {
+ return nil, err
+ }
+
+ segs := []segment{s}
+ for {
+ if _, err := p.accept("/"); err != nil {
+ return segs, nil
+ }
+ s, err := p.segment()
+ if err != nil {
+ return segs, err
+ }
+ segs = append(segs, s)
+ }
+}
+
+func (p *parser) segment() (segment, error) {
+ if _, err := p.accept("*"); err == nil {
+ return wildcard{}, nil
+ }
+ if _, err := p.accept("**"); err == nil {
+ return deepWildcard{}, nil
+ }
+ if l, err := p.literal(); err == nil {
+ return l, nil
+ }
+
+ v, err := p.variable()
+ if err != nil {
+ return nil, fmt.Errorf("segment neither wildcards, literal or variable: %w", err)
+ }
+ return v, nil
+}
+
+func (p *parser) literal() (segment, error) {
+ lit, err := p.accept(typeLiteral)
+ if err != nil {
+ return nil, err
+ }
+ return literal(lit), nil
+}
+
+func (p *parser) variable() (segment, error) {
+ if _, err := p.accept("{"); err != nil {
+ return nil, err
+ }
+
+ path, err := p.fieldPath()
+ if err != nil {
+ return nil, err
+ }
+
+ var segs []segment
+ if _, err := p.accept("="); err == nil {
+ segs, err = p.segments()
+ if err != nil {
+ return nil, fmt.Errorf("invalid segment in variable %q: %w", path, err)
+ }
+ } else {
+ segs = []segment{wildcard{}}
+ }
+
+ if _, err := p.accept("}"); err != nil {
+ return nil, fmt.Errorf("unterminated variable segment: %s", path)
+ }
+ return variable{
+ path: path,
+ segments: segs,
+ }, nil
+}
+
+func (p *parser) fieldPath() (string, error) {
+ c, err := p.accept(typeIdent)
+ if err != nil {
+ return "", err
+ }
+ components := []string{c}
+ for {
+ if _, err := p.accept("."); err != nil {
+ return strings.Join(components, "."), nil
+ }
+ c, err := p.accept(typeIdent)
+ if err != nil {
+ return "", fmt.Errorf("invalid field path component: %w", err)
+ }
+ components = append(components, c)
+ }
+}
+
+// A termType is a type of terminal symbols.
+type termType string
+
+// These constants define some of valid values of termType.
+// They improve readability of parse functions.
+//
+// You can also use "/", "*", "**", "." or "=" as valid values.
+const (
+ typeIdent = termType("ident")
+ typeLiteral = termType("literal")
+ typeEOF = termType("$")
+)
+
+// eof is the terminal symbol which always appears at the end of token sequence.
+const eof = "\u0000"
+
+// accept tries to accept a token in "p".
+// This function consumes a token and returns it if it matches to the specified "term".
+// If it doesn't match, the function does not consume any tokens and return an error.
+func (p *parser) accept(term termType) (string, error) {
+ t := p.tokens[0]
+ switch term {
+ case "/", "*", "**", ".", "=", "{", "}":
+ if t != string(term) && t != "/" {
+ return "", fmt.Errorf("expected %q but got %q", term, t)
+ }
+ case typeEOF:
+ if t != eof {
+ return "", fmt.Errorf("expected EOF but got %q", t)
+ }
+ case typeIdent:
+ if err := expectIdent(t); err != nil {
+ return "", err
+ }
+ case typeLiteral:
+ if err := expectPChars(t); err != nil {
+ return "", err
+ }
+ default:
+ return "", fmt.Errorf("unknown termType %q", term)
+ }
+ p.tokens = p.tokens[1:]
+ p.accepted = append(p.accepted, t)
+ return t, nil
+}
+
+// expectPChars determines if "t" consists of only pchars defined in RFC3986.
+//
+// https://www.ietf.org/rfc/rfc3986.txt, P.49
+//
+// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
+// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
+// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
+// / "*" / "+" / "," / ";" / "="
+// pct-encoded = "%" HEXDIG HEXDIG
+func expectPChars(t string) error {
+ const (
+ init = iota
+ pct1
+ pct2
+ )
+ st := init
+ for _, r := range t {
+ if st != init {
+ if !isHexDigit(r) {
+ return fmt.Errorf("invalid hexdigit: %c(%U)", r, r)
+ }
+ switch st {
+ case pct1:
+ st = pct2
+ case pct2:
+ st = init
+ }
+ continue
+ }
+
+ // unreserved
+ switch {
+ case 'A' <= r && r <= 'Z':
+ continue
+ case 'a' <= r && r <= 'z':
+ continue
+ case '0' <= r && r <= '9':
+ continue
+ }
+ switch r {
+ case '-', '.', '_', '~':
+ // unreserved
+ case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=':
+ // sub-delims
+ case ':', '@':
+ // rest of pchar
+ case '%':
+ // pct-encoded
+ st = pct1
+ default:
+ return fmt.Errorf("invalid character in path segment: %q(%U)", r, r)
+ }
+ }
+ if st != init {
+ return fmt.Errorf("invalid percent-encoding in %q", t)
+ }
+ return nil
+}
+
+// expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*).
+func expectIdent(ident string) error {
+ if ident == "" {
+ return errors.New("empty identifier")
+ }
+ for pos, r := range ident {
+ switch {
+ case '0' <= r && r <= '9':
+ if pos == 0 {
+ return fmt.Errorf("identifier starting with digit: %s", ident)
+ }
+ continue
+ case 'A' <= r && r <= 'Z':
+ continue
+ case 'a' <= r && r <= 'z':
+ continue
+ case r == '_':
+ continue
+ default:
+ return fmt.Errorf("invalid character %q(%U) in identifier: %s", r, r, ident)
+ }
+ }
+ return nil
+}
+
+func isHexDigit(r rune) bool {
+ switch {
+ case '0' <= r && r <= '9':
+ return true
+ case 'A' <= r && r <= 'F':
+ return true
+ case 'a' <= r && r <= 'f':
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go
new file mode 100644
index 00000000000..5a814a0004c
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go
@@ -0,0 +1,60 @@
+package httprule
+
+import (
+ "fmt"
+ "strings"
+)
+
+type template struct {
+ segments []segment
+ verb string
+ template string
+}
+
+type segment interface {
+ fmt.Stringer
+ compile() (ops []op)
+}
+
+type wildcard struct{}
+
+type deepWildcard struct{}
+
+type literal string
+
+type variable struct {
+ path string
+ segments []segment
+}
+
+func (wildcard) String() string {
+ return "*"
+}
+
+func (deepWildcard) String() string {
+ return "**"
+}
+
+func (l literal) String() string {
+ return string(l)
+}
+
+func (v variable) String() string {
+ var segs []string
+ for _, s := range v.segments {
+ segs = append(segs, s.String())
+ }
+ return fmt.Sprintf("{%s=%s}", v.path, strings.Join(segs, "/"))
+}
+
+func (t template) String() string {
+ var segs []string
+ for _, s := range t.segments {
+ segs = append(segs, s.String())
+ }
+ str := strings.Join(segs, "/")
+ if t.verb != "" {
+ str = fmt.Sprintf("%s:%s", str, t.verb)
+ }
+ return "/" + str
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel
new file mode 100644
index 00000000000..d71991e6e8d
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel
@@ -0,0 +1,44 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
+load("@rules_proto//proto:defs.bzl", "proto_library")
+
+package(default_visibility = ["//visibility:public"])
+
+filegroup(
+ name = "options_proto_files",
+ srcs = [
+ "annotations.proto",
+ "openapiv2.proto",
+ ],
+)
+
+go_library(
+ name = "options",
+ embed = [":options_go_proto"],
+ importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options",
+)
+
+proto_library(
+ name = "options_proto",
+ srcs = [
+ "annotations.proto",
+ "openapiv2.proto",
+ ],
+ deps = [
+ "@com_google_protobuf//:descriptor_proto",
+ "@com_google_protobuf//:struct_proto",
+ ],
+)
+
+go_proto_library(
+ name = "options_go_proto",
+ compilers = ["//:go_apiv2"],
+ importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options",
+ proto = ":options_proto",
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":options",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go
new file mode 100644
index 00000000000..738c9754a61
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go
@@ -0,0 +1,269 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.0
+// protoc (unknown)
+// source: protoc-gen-openapiv2/options/annotations.proto
+
+//go:build !protoopaque
+
+package options
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+var file_protoc_gen_openapiv2_options_annotations_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FileOptions)(nil),
+ ExtensionType: (*Swagger)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger",
+ Tag: "bytes,1042,opt,name=openapiv2_swagger",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.MethodOptions)(nil),
+ ExtensionType: (*Operation)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation",
+ Tag: "bytes,1042,opt,name=openapiv2_operation",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.MessageOptions)(nil),
+ ExtensionType: (*Schema)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema",
+ Tag: "bytes,1042,opt,name=openapiv2_schema",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.EnumOptions)(nil),
+ ExtensionType: (*EnumSchema)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum",
+ Tag: "bytes,1042,opt,name=openapiv2_enum",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.ServiceOptions)(nil),
+ ExtensionType: (*Tag)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag",
+ Tag: "bytes,1042,opt,name=openapiv2_tag",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*JSONSchema)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field",
+ Tag: "bytes,1042,opt,name=openapiv2_field",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FileOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.Swagger openapiv2_swagger = 1042;
+ E_Openapiv2Swagger = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[0]
+)
+
+// Extension fields to descriptorpb.MethodOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.Operation openapiv2_operation = 1042;
+ E_Openapiv2Operation = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.Schema openapiv2_schema = 1042;
+ E_Openapiv2Schema = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[2]
+)
+
+// Extension fields to descriptorpb.EnumOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.EnumSchema openapiv2_enum = 1042;
+ E_Openapiv2Enum = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[3]
+)
+
+// Extension fields to descriptorpb.ServiceOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.Tag openapiv2_tag = 1042;
+ E_Openapiv2Tag = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[4]
+)
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.JSONSchema openapiv2_field = 1042;
+ E_Openapiv2Field = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[5]
+)
+
+var File_protoc_gen_openapiv2_options_annotations_proto protoreflect.FileDescriptor
+
+var file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = []byte{
+ 0x0a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x29, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x7e, 0x0a, 0x11, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72,
+ 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92,
+ 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+ 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x52, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+ 0x70, 0x69, 0x76, 0x32, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x3a, 0x86, 0x01, 0x0a, 0x13,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x12, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x4f, 0x70, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x7e, 0x0a, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+ 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x52, 0x0f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x53, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x7b, 0x0a, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72,
+ 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+ 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x45, 0x6e, 0x75,
+ 0x6d, 0x3a, 0x75, 0x0a, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x74,
+ 0x61, 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x0c, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x54, 0x61, 0x67, 0x3a, 0x7e, 0x0a, 0x0f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1d, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53,
+ 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x76, 0x32, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73,
+ 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e,
+ 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_protoc_gen_openapiv2_options_annotations_proto_goTypes = []any{
+ (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions
+ (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions
+ (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions
+ (*descriptorpb.EnumOptions)(nil), // 3: google.protobuf.EnumOptions
+ (*descriptorpb.ServiceOptions)(nil), // 4: google.protobuf.ServiceOptions
+ (*descriptorpb.FieldOptions)(nil), // 5: google.protobuf.FieldOptions
+ (*Swagger)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger
+ (*Operation)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation
+ (*Schema)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Schema
+ (*EnumSchema)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+ (*Tag)(nil), // 10: grpc.gateway.protoc_gen_openapiv2.options.Tag
+ (*JSONSchema)(nil), // 11: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+}
+var file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = []int32{
+ 0, // 0: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:extendee -> google.protobuf.FileOptions
+ 1, // 1: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:extendee -> google.protobuf.MethodOptions
+ 2, // 2: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:extendee -> google.protobuf.MessageOptions
+ 3, // 3: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:extendee -> google.protobuf.EnumOptions
+ 4, // 4: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:extendee -> google.protobuf.ServiceOptions
+ 5, // 5: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:extendee -> google.protobuf.FieldOptions
+ 6, // 6: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger
+ 7, // 7: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation
+ 8, // 8: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema
+ 9, // 9: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+ 10, // 10: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag
+ 11, // 11: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+ 12, // [12:12] is the sub-list for method output_type
+ 12, // [12:12] is the sub-list for method input_type
+ 6, // [6:12] is the sub-list for extension type_name
+ 0, // [0:6] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_protoc_gen_openapiv2_options_annotations_proto_init() }
+func file_protoc_gen_openapiv2_options_annotations_proto_init() {
+ if File_protoc_gen_openapiv2_options_annotations_proto != nil {
+ return
+ }
+ file_protoc_gen_openapiv2_options_openapiv2_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_protoc_gen_openapiv2_options_annotations_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 6,
+ NumServices: 0,
+ },
+ GoTypes: file_protoc_gen_openapiv2_options_annotations_proto_goTypes,
+ DependencyIndexes: file_protoc_gen_openapiv2_options_annotations_proto_depIdxs,
+ ExtensionInfos: file_protoc_gen_openapiv2_options_annotations_proto_extTypes,
+ }.Build()
+ File_protoc_gen_openapiv2_options_annotations_proto = out.File
+ file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = nil
+ file_protoc_gen_openapiv2_options_annotations_proto_goTypes = nil
+ file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto
new file mode 100644
index 00000000000..aecc5e709c3
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto
@@ -0,0 +1,51 @@
+syntax = "proto3";
+
+package grpc.gateway.protoc_gen_openapiv2.options;
+
+import "google/protobuf/descriptor.proto";
+import "protoc-gen-openapiv2/options/openapiv2.proto";
+
+option go_package = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options";
+
+extend google.protobuf.FileOptions {
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ Swagger openapiv2_swagger = 1042;
+}
+extend google.protobuf.MethodOptions {
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ Operation openapiv2_operation = 1042;
+}
+extend google.protobuf.MessageOptions {
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ Schema openapiv2_schema = 1042;
+}
+extend google.protobuf.EnumOptions {
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ EnumSchema openapiv2_enum = 1042;
+}
+extend google.protobuf.ServiceOptions {
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ Tag openapiv2_tag = 1042;
+}
+extend google.protobuf.FieldOptions {
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ JSONSchema openapiv2_field = 1042;
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations_protoopaque.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations_protoopaque.pb.go
new file mode 100644
index 00000000000..b570167836d
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations_protoopaque.pb.go
@@ -0,0 +1,269 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.0
+// protoc (unknown)
+// source: protoc-gen-openapiv2/options/annotations.proto
+
+//go:build protoopaque
+
+package options
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+var file_protoc_gen_openapiv2_options_annotations_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FileOptions)(nil),
+ ExtensionType: (*Swagger)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger",
+ Tag: "bytes,1042,opt,name=openapiv2_swagger",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.MethodOptions)(nil),
+ ExtensionType: (*Operation)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation",
+ Tag: "bytes,1042,opt,name=openapiv2_operation",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.MessageOptions)(nil),
+ ExtensionType: (*Schema)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema",
+ Tag: "bytes,1042,opt,name=openapiv2_schema",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.EnumOptions)(nil),
+ ExtensionType: (*EnumSchema)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum",
+ Tag: "bytes,1042,opt,name=openapiv2_enum",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.ServiceOptions)(nil),
+ ExtensionType: (*Tag)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag",
+ Tag: "bytes,1042,opt,name=openapiv2_tag",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*JSONSchema)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field",
+ Tag: "bytes,1042,opt,name=openapiv2_field",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FileOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.Swagger openapiv2_swagger = 1042;
+ E_Openapiv2Swagger = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[0]
+)
+
+// Extension fields to descriptorpb.MethodOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.Operation openapiv2_operation = 1042;
+ E_Openapiv2Operation = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.Schema openapiv2_schema = 1042;
+ E_Openapiv2Schema = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[2]
+)
+
+// Extension fields to descriptorpb.EnumOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.EnumSchema openapiv2_enum = 1042;
+ E_Openapiv2Enum = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[3]
+)
+
+// Extension fields to descriptorpb.ServiceOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.Tag openapiv2_tag = 1042;
+ E_Openapiv2Tag = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[4]
+)
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.JSONSchema openapiv2_field = 1042;
+ E_Openapiv2Field = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[5]
+)
+
+var File_protoc_gen_openapiv2_options_annotations_proto protoreflect.FileDescriptor
+
+var file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = []byte{
+ 0x0a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x29, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x7e, 0x0a, 0x11, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72,
+ 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92,
+ 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+ 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x52, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+ 0x70, 0x69, 0x76, 0x32, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x3a, 0x86, 0x01, 0x0a, 0x13,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x12, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x4f, 0x70, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x7e, 0x0a, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+ 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x52, 0x0f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x53, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x7b, 0x0a, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72,
+ 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+ 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x45, 0x6e, 0x75,
+ 0x6d, 0x3a, 0x75, 0x0a, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x74,
+ 0x61, 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x0c, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x54, 0x61, 0x67, 0x3a, 0x7e, 0x0a, 0x0f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1d, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53,
+ 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x76, 0x32, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73,
+ 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e,
+ 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_protoc_gen_openapiv2_options_annotations_proto_goTypes = []any{
+ (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions
+ (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions
+ (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions
+ (*descriptorpb.EnumOptions)(nil), // 3: google.protobuf.EnumOptions
+ (*descriptorpb.ServiceOptions)(nil), // 4: google.protobuf.ServiceOptions
+ (*descriptorpb.FieldOptions)(nil), // 5: google.protobuf.FieldOptions
+ (*Swagger)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger
+ (*Operation)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation
+ (*Schema)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Schema
+ (*EnumSchema)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+ (*Tag)(nil), // 10: grpc.gateway.protoc_gen_openapiv2.options.Tag
+ (*JSONSchema)(nil), // 11: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+}
+var file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = []int32{
+ 0, // 0: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:extendee -> google.protobuf.FileOptions
+ 1, // 1: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:extendee -> google.protobuf.MethodOptions
+ 2, // 2: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:extendee -> google.protobuf.MessageOptions
+ 3, // 3: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:extendee -> google.protobuf.EnumOptions
+ 4, // 4: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:extendee -> google.protobuf.ServiceOptions
+ 5, // 5: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:extendee -> google.protobuf.FieldOptions
+ 6, // 6: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger
+ 7, // 7: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation
+ 8, // 8: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema
+ 9, // 9: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+ 10, // 10: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag
+ 11, // 11: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+ 12, // [12:12] is the sub-list for method output_type
+ 12, // [12:12] is the sub-list for method input_type
+ 6, // [6:12] is the sub-list for extension type_name
+ 0, // [0:6] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_protoc_gen_openapiv2_options_annotations_proto_init() }
+func file_protoc_gen_openapiv2_options_annotations_proto_init() {
+ if File_protoc_gen_openapiv2_options_annotations_proto != nil {
+ return
+ }
+ file_protoc_gen_openapiv2_options_openapiv2_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_protoc_gen_openapiv2_options_annotations_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 6,
+ NumServices: 0,
+ },
+ GoTypes: file_protoc_gen_openapiv2_options_annotations_proto_goTypes,
+ DependencyIndexes: file_protoc_gen_openapiv2_options_annotations_proto_depIdxs,
+ ExtensionInfos: file_protoc_gen_openapiv2_options_annotations_proto_extTypes,
+ }.Build()
+ File_protoc_gen_openapiv2_options_annotations_proto = out.File
+ file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = nil
+ file_protoc_gen_openapiv2_options_annotations_proto_goTypes = nil
+ file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/buf.gen.yaml b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/buf.gen.yaml
new file mode 100644
index 00000000000..07dfb958f1e
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/buf.gen.yaml
@@ -0,0 +1,7 @@
+version: v2
+plugins:
+ - remote: buf.build/protocolbuffers/go:v1.36.0
+ out: .
+ opt:
+ - paths=source_relative
+ - default_api_level=API_HYBRID
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go
new file mode 100644
index 00000000000..3a34e664e0a
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go
@@ -0,0 +1,4263 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.0
+// protoc (unknown)
+// source: protoc-gen-openapiv2/options/openapiv2.proto
+
+//go:build !protoopaque
+
+package options
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Scheme describes the schemes supported by the OpenAPI Swagger
+// and Operation objects.
+type Scheme int32
+
+const (
+ Scheme_UNKNOWN Scheme = 0
+ Scheme_HTTP Scheme = 1
+ Scheme_HTTPS Scheme = 2
+ Scheme_WS Scheme = 3
+ Scheme_WSS Scheme = 4
+)
+
+// Enum value maps for Scheme.
+var (
+ Scheme_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "HTTP",
+ 2: "HTTPS",
+ 3: "WS",
+ 4: "WSS",
+ }
+ Scheme_value = map[string]int32{
+ "UNKNOWN": 0,
+ "HTTP": 1,
+ "HTTPS": 2,
+ "WS": 3,
+ "WSS": 4,
+ }
+)
+
+func (x Scheme) Enum() *Scheme {
+ p := new(Scheme)
+ *p = x
+ return p
+}
+
+func (x Scheme) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Scheme) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0].Descriptor()
+}
+
+func (Scheme) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0]
+}
+
+func (x Scheme) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// `Type` is a supported HTTP header type.
+// See https://swagger.io/specification/v2/#parameterType.
+type HeaderParameter_Type int32
+
+const (
+ HeaderParameter_UNKNOWN HeaderParameter_Type = 0
+ HeaderParameter_STRING HeaderParameter_Type = 1
+ HeaderParameter_NUMBER HeaderParameter_Type = 2
+ HeaderParameter_INTEGER HeaderParameter_Type = 3
+ HeaderParameter_BOOLEAN HeaderParameter_Type = 4
+)
+
+// Enum value maps for HeaderParameter_Type.
+var (
+ HeaderParameter_Type_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "STRING",
+ 2: "NUMBER",
+ 3: "INTEGER",
+ 4: "BOOLEAN",
+ }
+ HeaderParameter_Type_value = map[string]int32{
+ "UNKNOWN": 0,
+ "STRING": 1,
+ "NUMBER": 2,
+ "INTEGER": 3,
+ "BOOLEAN": 4,
+ }
+)
+
+func (x HeaderParameter_Type) Enum() *HeaderParameter_Type {
+ p := new(HeaderParameter_Type)
+ *p = x
+ return p
+}
+
+func (x HeaderParameter_Type) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HeaderParameter_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1].Descriptor()
+}
+
+func (HeaderParameter_Type) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1]
+}
+
+func (x HeaderParameter_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+type JSONSchema_JSONSchemaSimpleTypes int32
+
+const (
+ JSONSchema_UNKNOWN JSONSchema_JSONSchemaSimpleTypes = 0
+ JSONSchema_ARRAY JSONSchema_JSONSchemaSimpleTypes = 1
+ JSONSchema_BOOLEAN JSONSchema_JSONSchemaSimpleTypes = 2
+ JSONSchema_INTEGER JSONSchema_JSONSchemaSimpleTypes = 3
+ JSONSchema_NULL JSONSchema_JSONSchemaSimpleTypes = 4
+ JSONSchema_NUMBER JSONSchema_JSONSchemaSimpleTypes = 5
+ JSONSchema_OBJECT JSONSchema_JSONSchemaSimpleTypes = 6
+ JSONSchema_STRING JSONSchema_JSONSchemaSimpleTypes = 7
+)
+
+// Enum value maps for JSONSchema_JSONSchemaSimpleTypes.
+var (
+ JSONSchema_JSONSchemaSimpleTypes_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "ARRAY",
+ 2: "BOOLEAN",
+ 3: "INTEGER",
+ 4: "NULL",
+ 5: "NUMBER",
+ 6: "OBJECT",
+ 7: "STRING",
+ }
+ JSONSchema_JSONSchemaSimpleTypes_value = map[string]int32{
+ "UNKNOWN": 0,
+ "ARRAY": 1,
+ "BOOLEAN": 2,
+ "INTEGER": 3,
+ "NULL": 4,
+ "NUMBER": 5,
+ "OBJECT": 6,
+ "STRING": 7,
+ }
+)
+
+func (x JSONSchema_JSONSchemaSimpleTypes) Enum() *JSONSchema_JSONSchemaSimpleTypes {
+ p := new(JSONSchema_JSONSchemaSimpleTypes)
+ *p = x
+ return p
+}
+
+func (x JSONSchema_JSONSchemaSimpleTypes) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (JSONSchema_JSONSchemaSimpleTypes) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2].Descriptor()
+}
+
+func (JSONSchema_JSONSchemaSimpleTypes) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2]
+}
+
+func (x JSONSchema_JSONSchemaSimpleTypes) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// The type of the security scheme. Valid values are "basic",
+// "apiKey" or "oauth2".
+type SecurityScheme_Type int32
+
+const (
+ SecurityScheme_TYPE_INVALID SecurityScheme_Type = 0
+ SecurityScheme_TYPE_BASIC SecurityScheme_Type = 1
+ SecurityScheme_TYPE_API_KEY SecurityScheme_Type = 2
+ SecurityScheme_TYPE_OAUTH2 SecurityScheme_Type = 3
+)
+
+// Enum value maps for SecurityScheme_Type.
+var (
+ SecurityScheme_Type_name = map[int32]string{
+ 0: "TYPE_INVALID",
+ 1: "TYPE_BASIC",
+ 2: "TYPE_API_KEY",
+ 3: "TYPE_OAUTH2",
+ }
+ SecurityScheme_Type_value = map[string]int32{
+ "TYPE_INVALID": 0,
+ "TYPE_BASIC": 1,
+ "TYPE_API_KEY": 2,
+ "TYPE_OAUTH2": 3,
+ }
+)
+
+func (x SecurityScheme_Type) Enum() *SecurityScheme_Type {
+ p := new(SecurityScheme_Type)
+ *p = x
+ return p
+}
+
+func (x SecurityScheme_Type) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3].Descriptor()
+}
+
+func (SecurityScheme_Type) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3]
+}
+
+func (x SecurityScheme_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// The location of the API key. Valid values are "query" or "header".
+type SecurityScheme_In int32
+
+const (
+ SecurityScheme_IN_INVALID SecurityScheme_In = 0
+ SecurityScheme_IN_QUERY SecurityScheme_In = 1
+ SecurityScheme_IN_HEADER SecurityScheme_In = 2
+)
+
+// Enum value maps for SecurityScheme_In.
+var (
+ SecurityScheme_In_name = map[int32]string{
+ 0: "IN_INVALID",
+ 1: "IN_QUERY",
+ 2: "IN_HEADER",
+ }
+ SecurityScheme_In_value = map[string]int32{
+ "IN_INVALID": 0,
+ "IN_QUERY": 1,
+ "IN_HEADER": 2,
+ }
+)
+
+func (x SecurityScheme_In) Enum() *SecurityScheme_In {
+ p := new(SecurityScheme_In)
+ *p = x
+ return p
+}
+
+func (x SecurityScheme_In) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_In) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4].Descriptor()
+}
+
+func (SecurityScheme_In) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4]
+}
+
+func (x SecurityScheme_In) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// The flow used by the OAuth2 security scheme. Valid values are
+// "implicit", "password", "application" or "accessCode".
+type SecurityScheme_Flow int32
+
+const (
+ SecurityScheme_FLOW_INVALID SecurityScheme_Flow = 0
+ SecurityScheme_FLOW_IMPLICIT SecurityScheme_Flow = 1
+ SecurityScheme_FLOW_PASSWORD SecurityScheme_Flow = 2
+ SecurityScheme_FLOW_APPLICATION SecurityScheme_Flow = 3
+ SecurityScheme_FLOW_ACCESS_CODE SecurityScheme_Flow = 4
+)
+
+// Enum value maps for SecurityScheme_Flow.
+var (
+ SecurityScheme_Flow_name = map[int32]string{
+ 0: "FLOW_INVALID",
+ 1: "FLOW_IMPLICIT",
+ 2: "FLOW_PASSWORD",
+ 3: "FLOW_APPLICATION",
+ 4: "FLOW_ACCESS_CODE",
+ }
+ SecurityScheme_Flow_value = map[string]int32{
+ "FLOW_INVALID": 0,
+ "FLOW_IMPLICIT": 1,
+ "FLOW_PASSWORD": 2,
+ "FLOW_APPLICATION": 3,
+ "FLOW_ACCESS_CODE": 4,
+ }
+)
+
+func (x SecurityScheme_Flow) Enum() *SecurityScheme_Flow {
+ p := new(SecurityScheme_Flow)
+ *p = x
+ return p
+}
+
+func (x SecurityScheme_Flow) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_Flow) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5].Descriptor()
+}
+
+func (SecurityScheme_Flow) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5]
+}
+
+func (x SecurityScheme_Flow) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// `Swagger` is a representation of OpenAPI v2 specification's Swagger object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// title: "Echo API";
+// version: "1.0";
+// description: "";
+// contact: {
+// name: "gRPC-Gateway project";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// email: "none@example.com";
+// };
+// license: {
+// name: "BSD 3-Clause License";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+// };
+// };
+// schemes: HTTPS;
+// consumes: "application/json";
+// produces: "application/json";
+// };
+type Swagger struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // Specifies the OpenAPI Specification version being used. It can be
+ // used by the OpenAPI UI and other clients to interpret the API listing. The
+ // value MUST be "2.0".
+ Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"`
+ // Provides metadata about the API. The metadata can be used by the
+ // clients if needed.
+ Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"`
+ // The host (name or ip) serving the API. This MUST be the host only and does
+ // not include the scheme nor sub-paths. It MAY include a port. If the host is
+ // not included, the host serving the documentation is to be used (including
+ // the port). The host does not support path templating.
+ Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"`
+ // The base path on which the API is served, which is relative to the host. If
+ // it is not included, the API is served directly under the host. The value
+ // MUST start with a leading slash (/). The basePath does not support path
+ // templating.
+ // Note that using `base_path` does not change the endpoint paths that are
+ // generated in the resulting OpenAPI file. If you wish to use `base_path`
+ // with relatively generated OpenAPI paths, the `base_path` prefix must be
+ // manually removed from your `google.api.http` paths and your code changed to
+ // serve the API from the `base_path`.
+ BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"`
+ // The transfer protocol of the API. Values MUST be from the list: "http",
+ // "https", "ws", "wss". If the schemes is not included, the default scheme to
+ // be used is the one used to access the OpenAPI definition itself.
+ Schemes []Scheme `protobuf:"varint,5,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"`
+ // A list of MIME types the APIs can consume. This is global to all APIs but
+ // can be overridden on specific API calls. Value MUST be as described under
+ // Mime Types.
+ Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"`
+ // A list of MIME types the APIs can produce. This is global to all APIs but
+ // can be overridden on specific API calls. Value MUST be as described under
+ // Mime Types.
+ Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"`
+ // An object to hold responses that can be used across operations. This
+ // property does not define global responses for all operations.
+ Responses map[string]*Response `protobuf:"bytes,10,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ // Security scheme definitions that can be used across the specification.
+ SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,11,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"`
+ // A declaration of which security schemes are applied for the API as a whole.
+ // The list of values describes alternative security schemes that can be used
+ // (that is, there is a logical OR between the security requirements).
+ // Individual operations can override this definition.
+ Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"`
+ // A list of tags for API documentation control. Tags can be used for logical
+ // grouping of operations by resources or any other qualifier.
+ Tags []*Tag `protobuf:"bytes,13,rep,name=tags,proto3" json:"tags,omitempty"`
+ // Additional external documentation.
+ ExternalDocs *ExternalDocumentation `protobuf:"bytes,14,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value `protobuf:"bytes,15,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Swagger) Reset() {
+ *x = Swagger{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Swagger) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Swagger) ProtoMessage() {}
+
+func (x *Swagger) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Swagger) GetSwagger() string {
+ if x != nil {
+ return x.Swagger
+ }
+ return ""
+}
+
+func (x *Swagger) GetInfo() *Info {
+ if x != nil {
+ return x.Info
+ }
+ return nil
+}
+
+func (x *Swagger) GetHost() string {
+ if x != nil {
+ return x.Host
+ }
+ return ""
+}
+
+func (x *Swagger) GetBasePath() string {
+ if x != nil {
+ return x.BasePath
+ }
+ return ""
+}
+
+func (x *Swagger) GetSchemes() []Scheme {
+ if x != nil {
+ return x.Schemes
+ }
+ return nil
+}
+
+func (x *Swagger) GetConsumes() []string {
+ if x != nil {
+ return x.Consumes
+ }
+ return nil
+}
+
+func (x *Swagger) GetProduces() []string {
+ if x != nil {
+ return x.Produces
+ }
+ return nil
+}
+
+func (x *Swagger) GetResponses() map[string]*Response {
+ if x != nil {
+ return x.Responses
+ }
+ return nil
+}
+
+func (x *Swagger) GetSecurityDefinitions() *SecurityDefinitions {
+ if x != nil {
+ return x.SecurityDefinitions
+ }
+ return nil
+}
+
+func (x *Swagger) GetSecurity() []*SecurityRequirement {
+ if x != nil {
+ return x.Security
+ }
+ return nil
+}
+
+func (x *Swagger) GetTags() []*Tag {
+ if x != nil {
+ return x.Tags
+ }
+ return nil
+}
+
+func (x *Swagger) GetExternalDocs() *ExternalDocumentation {
+ if x != nil {
+ return x.ExternalDocs
+ }
+ return nil
+}
+
+func (x *Swagger) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+func (x *Swagger) SetSwagger(v string) {
+ x.Swagger = v
+}
+
+func (x *Swagger) SetInfo(v *Info) {
+ x.Info = v
+}
+
+func (x *Swagger) SetHost(v string) {
+ x.Host = v
+}
+
+func (x *Swagger) SetBasePath(v string) {
+ x.BasePath = v
+}
+
+func (x *Swagger) SetSchemes(v []Scheme) {
+ x.Schemes = v
+}
+
+func (x *Swagger) SetConsumes(v []string) {
+ x.Consumes = v
+}
+
+func (x *Swagger) SetProduces(v []string) {
+ x.Produces = v
+}
+
+func (x *Swagger) SetResponses(v map[string]*Response) {
+ x.Responses = v
+}
+
+func (x *Swagger) SetSecurityDefinitions(v *SecurityDefinitions) {
+ x.SecurityDefinitions = v
+}
+
+func (x *Swagger) SetSecurity(v []*SecurityRequirement) {
+ x.Security = v
+}
+
+func (x *Swagger) SetTags(v []*Tag) {
+ x.Tags = v
+}
+
+func (x *Swagger) SetExternalDocs(v *ExternalDocumentation) {
+ x.ExternalDocs = v
+}
+
+func (x *Swagger) SetExtensions(v map[string]*structpb.Value) {
+ x.Extensions = v
+}
+
+func (x *Swagger) HasInfo() bool {
+ if x == nil {
+ return false
+ }
+ return x.Info != nil
+}
+
+func (x *Swagger) HasSecurityDefinitions() bool {
+ if x == nil {
+ return false
+ }
+ return x.SecurityDefinitions != nil
+}
+
+func (x *Swagger) HasExternalDocs() bool {
+ if x == nil {
+ return false
+ }
+ return x.ExternalDocs != nil
+}
+
+func (x *Swagger) ClearInfo() {
+ x.Info = nil
+}
+
+func (x *Swagger) ClearSecurityDefinitions() {
+ x.SecurityDefinitions = nil
+}
+
+func (x *Swagger) ClearExternalDocs() {
+ x.ExternalDocs = nil
+}
+
+type Swagger_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // Specifies the OpenAPI Specification version being used. It can be
+ // used by the OpenAPI UI and other clients to interpret the API listing. The
+ // value MUST be "2.0".
+ Swagger string
+ // Provides metadata about the API. The metadata can be used by the
+ // clients if needed.
+ Info *Info
+ // The host (name or ip) serving the API. This MUST be the host only and does
+ // not include the scheme nor sub-paths. It MAY include a port. If the host is
+ // not included, the host serving the documentation is to be used (including
+ // the port). The host does not support path templating.
+ Host string
+ // The base path on which the API is served, which is relative to the host. If
+ // it is not included, the API is served directly under the host. The value
+ // MUST start with a leading slash (/). The basePath does not support path
+ // templating.
+ // Note that using `base_path` does not change the endpoint paths that are
+ // generated in the resulting OpenAPI file. If you wish to use `base_path`
+ // with relatively generated OpenAPI paths, the `base_path` prefix must be
+ // manually removed from your `google.api.http` paths and your code changed to
+ // serve the API from the `base_path`.
+ BasePath string
+ // The transfer protocol of the API. Values MUST be from the list: "http",
+ // "https", "ws", "wss". If the schemes is not included, the default scheme to
+ // be used is the one used to access the OpenAPI definition itself.
+ Schemes []Scheme
+ // A list of MIME types the APIs can consume. This is global to all APIs but
+ // can be overridden on specific API calls. Value MUST be as described under
+ // Mime Types.
+ Consumes []string
+ // A list of MIME types the APIs can produce. This is global to all APIs but
+ // can be overridden on specific API calls. Value MUST be as described under
+ // Mime Types.
+ Produces []string
+ // An object to hold responses that can be used across operations. This
+ // property does not define global responses for all operations.
+ Responses map[string]*Response
+ // Security scheme definitions that can be used across the specification.
+ SecurityDefinitions *SecurityDefinitions
+ // A declaration of which security schemes are applied for the API as a whole.
+ // The list of values describes alternative security schemes that can be used
+ // (that is, there is a logical OR between the security requirements).
+ // Individual operations can override this definition.
+ Security []*SecurityRequirement
+ // A list of tags for API documentation control. Tags can be used for logical
+ // grouping of operations by resources or any other qualifier.
+ Tags []*Tag
+ // Additional external documentation.
+ ExternalDocs *ExternalDocumentation
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 Swagger_builder) Build() *Swagger {
+ m0 := &Swagger{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Swagger = b.Swagger
+ x.Info = b.Info
+ x.Host = b.Host
+ x.BasePath = b.BasePath
+ x.Schemes = b.Schemes
+ x.Consumes = b.Consumes
+ x.Produces = b.Produces
+ x.Responses = b.Responses
+ x.SecurityDefinitions = b.SecurityDefinitions
+ x.Security = b.Security
+ x.Tags = b.Tags
+ x.ExternalDocs = b.ExternalDocs
+ x.Extensions = b.Extensions
+ return m0
+}
+
+// `Operation` is a representation of OpenAPI v2 specification's Operation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject
+//
+// Example:
+//
+// service EchoService {
+// rpc Echo(SimpleMessage) returns (SimpleMessage) {
+// option (google.api.http) = {
+// get: "/v1/example/echo/{id}"
+// };
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = {
+// summary: "Get a message.";
+// operation_id: "getMessage";
+// tags: "echo";
+// responses: {
+// key: "200"
+// value: {
+// description: "OK";
+// }
+// }
+// };
+// }
+// }
+type Operation struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // A list of tags for API documentation control. Tags can be used for logical
+ // grouping of operations by resources or any other qualifier.
+ Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"`
+ // A short summary of what the operation does. For maximum readability in the
+ // swagger-ui, this field SHOULD be less than 120 characters.
+ Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"`
+ // A verbose explanation of the operation behavior. GFM syntax can be used for
+ // rich text representation.
+ Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+ // Additional external documentation for this operation.
+ ExternalDocs *ExternalDocumentation `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ // Unique string used to identify the operation. The id MUST be unique among
+ // all operations described in the API. Tools and libraries MAY use the
+ // operationId to uniquely identify an operation, therefore, it is recommended
+ // to follow common programming naming conventions.
+ OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"`
+ // A list of MIME types the operation can consume. This overrides the consumes
+ // definition at the OpenAPI Object. An empty value MAY be used to clear the
+ // global definition. Value MUST be as described under Mime Types.
+ Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"`
+ // A list of MIME types the operation can produce. This overrides the produces
+ // definition at the OpenAPI Object. An empty value MAY be used to clear the
+ // global definition. Value MUST be as described under Mime Types.
+ Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"`
+ // The list of possible responses as they are returned from executing this
+ // operation.
+ Responses map[string]*Response `protobuf:"bytes,9,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ // The transfer protocol for the operation. Values MUST be from the list:
+ // "http", "https", "ws", "wss". The value overrides the OpenAPI Object
+ // schemes definition.
+ Schemes []Scheme `protobuf:"varint,10,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"`
+ // Declares this operation to be deprecated. Usage of the declared operation
+ // should be refrained. Default value is false.
+ Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"`
+ // A declaration of which security schemes are applied for this operation. The
+ // list of values describes alternative security schemes that can be used
+ // (that is, there is a logical OR between the security requirements). This
+ // definition overrides any declared top-level security. To remove a top-level
+ // security declaration, an empty array can be used.
+ Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"`
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value `protobuf:"bytes,13,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ // Custom parameters such as HTTP request headers.
+ // See: https://swagger.io/docs/specification/2-0/describing-parameters/
+ // and https://swagger.io/specification/v2/#parameter-object.
+ Parameters *Parameters `protobuf:"bytes,14,opt,name=parameters,proto3" json:"parameters,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Operation) Reset() {
+ *x = Operation{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Operation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Operation) ProtoMessage() {}
+
+func (x *Operation) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Operation) GetTags() []string {
+ if x != nil {
+ return x.Tags
+ }
+ return nil
+}
+
+func (x *Operation) GetSummary() string {
+ if x != nil {
+ return x.Summary
+ }
+ return ""
+}
+
+func (x *Operation) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *Operation) GetExternalDocs() *ExternalDocumentation {
+ if x != nil {
+ return x.ExternalDocs
+ }
+ return nil
+}
+
+func (x *Operation) GetOperationId() string {
+ if x != nil {
+ return x.OperationId
+ }
+ return ""
+}
+
+func (x *Operation) GetConsumes() []string {
+ if x != nil {
+ return x.Consumes
+ }
+ return nil
+}
+
+func (x *Operation) GetProduces() []string {
+ if x != nil {
+ return x.Produces
+ }
+ return nil
+}
+
+func (x *Operation) GetResponses() map[string]*Response {
+ if x != nil {
+ return x.Responses
+ }
+ return nil
+}
+
+func (x *Operation) GetSchemes() []Scheme {
+ if x != nil {
+ return x.Schemes
+ }
+ return nil
+}
+
+func (x *Operation) GetDeprecated() bool {
+ if x != nil {
+ return x.Deprecated
+ }
+ return false
+}
+
+func (x *Operation) GetSecurity() []*SecurityRequirement {
+ if x != nil {
+ return x.Security
+ }
+ return nil
+}
+
+func (x *Operation) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+func (x *Operation) GetParameters() *Parameters {
+ if x != nil {
+ return x.Parameters
+ }
+ return nil
+}
+
+func (x *Operation) SetTags(v []string) {
+ x.Tags = v
+}
+
+func (x *Operation) SetSummary(v string) {
+ x.Summary = v
+}
+
+func (x *Operation) SetDescription(v string) {
+ x.Description = v
+}
+
+func (x *Operation) SetExternalDocs(v *ExternalDocumentation) {
+ x.ExternalDocs = v
+}
+
+func (x *Operation) SetOperationId(v string) {
+ x.OperationId = v
+}
+
+func (x *Operation) SetConsumes(v []string) {
+ x.Consumes = v
+}
+
+func (x *Operation) SetProduces(v []string) {
+ x.Produces = v
+}
+
+func (x *Operation) SetResponses(v map[string]*Response) {
+ x.Responses = v
+}
+
+func (x *Operation) SetSchemes(v []Scheme) {
+ x.Schemes = v
+}
+
+func (x *Operation) SetDeprecated(v bool) {
+ x.Deprecated = v
+}
+
+func (x *Operation) SetSecurity(v []*SecurityRequirement) {
+ x.Security = v
+}
+
+func (x *Operation) SetExtensions(v map[string]*structpb.Value) {
+ x.Extensions = v
+}
+
+func (x *Operation) SetParameters(v *Parameters) {
+ x.Parameters = v
+}
+
+func (x *Operation) HasExternalDocs() bool {
+ if x == nil {
+ return false
+ }
+ return x.ExternalDocs != nil
+}
+
+func (x *Operation) HasParameters() bool {
+ if x == nil {
+ return false
+ }
+ return x.Parameters != nil
+}
+
+func (x *Operation) ClearExternalDocs() {
+ x.ExternalDocs = nil
+}
+
+func (x *Operation) ClearParameters() {
+ x.Parameters = nil
+}
+
+type Operation_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // A list of tags for API documentation control. Tags can be used for logical
+ // grouping of operations by resources or any other qualifier.
+ Tags []string
+ // A short summary of what the operation does. For maximum readability in the
+ // swagger-ui, this field SHOULD be less than 120 characters.
+ Summary string
+ // A verbose explanation of the operation behavior. GFM syntax can be used for
+ // rich text representation.
+ Description string
+ // Additional external documentation for this operation.
+ ExternalDocs *ExternalDocumentation
+ // Unique string used to identify the operation. The id MUST be unique among
+ // all operations described in the API. Tools and libraries MAY use the
+ // operationId to uniquely identify an operation, therefore, it is recommended
+ // to follow common programming naming conventions.
+ OperationId string
+ // A list of MIME types the operation can consume. This overrides the consumes
+ // definition at the OpenAPI Object. An empty value MAY be used to clear the
+ // global definition. Value MUST be as described under Mime Types.
+ Consumes []string
+ // A list of MIME types the operation can produce. This overrides the produces
+ // definition at the OpenAPI Object. An empty value MAY be used to clear the
+ // global definition. Value MUST be as described under Mime Types.
+ Produces []string
+ // The list of possible responses as they are returned from executing this
+ // operation.
+ Responses map[string]*Response
+ // The transfer protocol for the operation. Values MUST be from the list:
+ // "http", "https", "ws", "wss". The value overrides the OpenAPI Object
+ // schemes definition.
+ Schemes []Scheme
+ // Declares this operation to be deprecated. Usage of the declared operation
+ // should be refrained. Default value is false.
+ Deprecated bool
+ // A declaration of which security schemes are applied for this operation. The
+ // list of values describes alternative security schemes that can be used
+ // (that is, there is a logical OR between the security requirements). This
+ // definition overrides any declared top-level security. To remove a top-level
+ // security declaration, an empty array can be used.
+ Security []*SecurityRequirement
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+ // Custom parameters such as HTTP request headers.
+ // See: https://swagger.io/docs/specification/2-0/describing-parameters/
+ // and https://swagger.io/specification/v2/#parameter-object.
+ Parameters *Parameters
+}
+
+func (b0 Operation_builder) Build() *Operation {
+ m0 := &Operation{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Tags = b.Tags
+ x.Summary = b.Summary
+ x.Description = b.Description
+ x.ExternalDocs = b.ExternalDocs
+ x.OperationId = b.OperationId
+ x.Consumes = b.Consumes
+ x.Produces = b.Produces
+ x.Responses = b.Responses
+ x.Schemes = b.Schemes
+ x.Deprecated = b.Deprecated
+ x.Security = b.Security
+ x.Extensions = b.Extensions
+ x.Parameters = b.Parameters
+ return m0
+}
+
+// `Parameters` is a representation of OpenAPI v2 specification's parameters object.
+// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only
+// allow header parameters to be set here since we do not want users specifying custom non-header
+// parameters beyond those inferred from the Protobuf schema.
+// See: https://swagger.io/specification/v2/#parameter-object
+type Parameters struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // `Headers` is one or more HTTP header parameter.
+ // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters
+ Headers []*HeaderParameter `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Parameters) Reset() {
+ *x = Parameters{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Parameters) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Parameters) ProtoMessage() {}
+
+func (x *Parameters) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Parameters) GetHeaders() []*HeaderParameter {
+ if x != nil {
+ return x.Headers
+ }
+ return nil
+}
+
+func (x *Parameters) SetHeaders(v []*HeaderParameter) {
+ x.Headers = v
+}
+
+type Parameters_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // `Headers` is one or more HTTP header parameter.
+ // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters
+ Headers []*HeaderParameter
+}
+
+func (b0 Parameters_builder) Build() *Parameters {
+ m0 := &Parameters{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Headers = b.Headers
+ return m0
+}
+
+// `HeaderParameter` a HTTP header parameter.
+// See: https://swagger.io/specification/v2/#parameter-object
+type HeaderParameter struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // `Name` is the header name.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // `Description` is a short description of the header.
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+ // See: https://swagger.io/specification/v2/#parameterType.
+ Type HeaderParameter_Type `protobuf:"varint,3,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter_Type" json:"type,omitempty"`
+ // `Format` The extending format for the previously mentioned type.
+ Format string `protobuf:"bytes,4,opt,name=format,proto3" json:"format,omitempty"`
+ // `Required` indicates if the header is optional
+ Required bool `protobuf:"varint,5,opt,name=required,proto3" json:"required,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *HeaderParameter) Reset() {
+ *x = HeaderParameter{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *HeaderParameter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HeaderParameter) ProtoMessage() {}
+
+func (x *HeaderParameter) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *HeaderParameter) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *HeaderParameter) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *HeaderParameter) GetType() HeaderParameter_Type {
+ if x != nil {
+ return x.Type
+ }
+ return HeaderParameter_UNKNOWN
+}
+
+func (x *HeaderParameter) GetFormat() string {
+ if x != nil {
+ return x.Format
+ }
+ return ""
+}
+
+func (x *HeaderParameter) GetRequired() bool {
+ if x != nil {
+ return x.Required
+ }
+ return false
+}
+
+func (x *HeaderParameter) SetName(v string) {
+ x.Name = v
+}
+
+func (x *HeaderParameter) SetDescription(v string) {
+ x.Description = v
+}
+
+func (x *HeaderParameter) SetType(v HeaderParameter_Type) {
+ x.Type = v
+}
+
+func (x *HeaderParameter) SetFormat(v string) {
+ x.Format = v
+}
+
+func (x *HeaderParameter) SetRequired(v bool) {
+ x.Required = v
+}
+
+type HeaderParameter_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // `Name` is the header name.
+ Name string
+ // `Description` is a short description of the header.
+ Description string
+ // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+ // See: https://swagger.io/specification/v2/#parameterType.
+ Type HeaderParameter_Type
+ // `Format` The extending format for the previously mentioned type.
+ Format string
+ // `Required` indicates if the header is optional
+ Required bool
+}
+
+func (b0 HeaderParameter_builder) Build() *HeaderParameter {
+ m0 := &HeaderParameter{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Name = b.Name
+ x.Description = b.Description
+ x.Type = b.Type
+ x.Format = b.Format
+ x.Required = b.Required
+ return m0
+}
+
+// `Header` is a representation of OpenAPI v2 specification's Header object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject
+type Header struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // `Description` is a short description of the header.
+ Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+ Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
+ // `Format` The extending format for the previously mentioned type.
+ Format string `protobuf:"bytes,3,opt,name=format,proto3" json:"format,omitempty"`
+ // `Default` Declares the value of the header that the server will use if none is provided.
+ // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2.
+ // Unlike JSON Schema this value MUST conform to the defined type for the header.
+ Default string `protobuf:"bytes,6,opt,name=default,proto3" json:"default,omitempty"`
+ // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3.
+ Pattern string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Header) Reset() {
+ *x = Header{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Header) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Header) ProtoMessage() {}
+
+func (x *Header) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Header) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *Header) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *Header) GetFormat() string {
+ if x != nil {
+ return x.Format
+ }
+ return ""
+}
+
+func (x *Header) GetDefault() string {
+ if x != nil {
+ return x.Default
+ }
+ return ""
+}
+
+func (x *Header) GetPattern() string {
+ if x != nil {
+ return x.Pattern
+ }
+ return ""
+}
+
+func (x *Header) SetDescription(v string) {
+ x.Description = v
+}
+
+func (x *Header) SetType(v string) {
+ x.Type = v
+}
+
+func (x *Header) SetFormat(v string) {
+ x.Format = v
+}
+
+func (x *Header) SetDefault(v string) {
+ x.Default = v
+}
+
+func (x *Header) SetPattern(v string) {
+ x.Pattern = v
+}
+
+type Header_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // `Description` is a short description of the header.
+ Description string
+ // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+ Type string
+ // `Format` The extending format for the previously mentioned type.
+ Format string
+ // `Default` Declares the value of the header that the server will use if none is provided.
+ // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2.
+ // Unlike JSON Schema this value MUST conform to the defined type for the header.
+ Default string
+ // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3.
+ Pattern string
+}
+
+func (b0 Header_builder) Build() *Header {
+ m0 := &Header{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Description = b.Description
+ x.Type = b.Type
+ x.Format = b.Format
+ x.Default = b.Default
+ x.Pattern = b.Pattern
+ return m0
+}
+
+// `Response` is a representation of OpenAPI v2 specification's Response object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject
+type Response struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // `Description` is a short description of the response.
+ // GFM syntax can be used for rich text representation.
+ Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ // `Schema` optionally defines the structure of the response.
+ // If `Schema` is not provided, it means there is no content to the response.
+ Schema *Schema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"`
+ // `Headers` A list of headers that are sent with the response.
+ // `Header` name is expected to be a string in the canonical format of the MIME header key
+ // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey
+ Headers map[string]*Header `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ // `Examples` gives per-mimetype response examples.
+ // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object
+ Examples map[string]string `protobuf:"bytes,4,rep,name=examples,proto3" json:"examples,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value `protobuf:"bytes,5,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Response) Reset() {
+ *x = Response{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Response) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Response) ProtoMessage() {}
+
+func (x *Response) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Response) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *Response) GetSchema() *Schema {
+ if x != nil {
+ return x.Schema
+ }
+ return nil
+}
+
+func (x *Response) GetHeaders() map[string]*Header {
+ if x != nil {
+ return x.Headers
+ }
+ return nil
+}
+
+func (x *Response) GetExamples() map[string]string {
+ if x != nil {
+ return x.Examples
+ }
+ return nil
+}
+
+func (x *Response) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+func (x *Response) SetDescription(v string) {
+ x.Description = v
+}
+
+func (x *Response) SetSchema(v *Schema) {
+ x.Schema = v
+}
+
+func (x *Response) SetHeaders(v map[string]*Header) {
+ x.Headers = v
+}
+
+func (x *Response) SetExamples(v map[string]string) {
+ x.Examples = v
+}
+
+func (x *Response) SetExtensions(v map[string]*structpb.Value) {
+ x.Extensions = v
+}
+
+func (x *Response) HasSchema() bool {
+ if x == nil {
+ return false
+ }
+ return x.Schema != nil
+}
+
+func (x *Response) ClearSchema() {
+ x.Schema = nil
+}
+
+type Response_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // `Description` is a short description of the response.
+ // GFM syntax can be used for rich text representation.
+ Description string
+ // `Schema` optionally defines the structure of the response.
+ // If `Schema` is not provided, it means there is no content to the response.
+ Schema *Schema
+ // `Headers` A list of headers that are sent with the response.
+ // `Header` name is expected to be a string in the canonical format of the MIME header key
+ // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey
+ Headers map[string]*Header
+ // `Examples` gives per-mimetype response examples.
+ // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object
+ Examples map[string]string
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 Response_builder) Build() *Response {
+ m0 := &Response{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Description = b.Description
+ x.Schema = b.Schema
+ x.Headers = b.Headers
+ x.Examples = b.Examples
+ x.Extensions = b.Extensions
+ return m0
+}
+
+// `Info` is a representation of OpenAPI v2 specification's Info object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// title: "Echo API";
+// version: "1.0";
+// description: "";
+// contact: {
+// name: "gRPC-Gateway project";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// email: "none@example.com";
+// };
+// license: {
+// name: "BSD 3-Clause License";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+// };
+// };
+// ...
+// };
+type Info struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // The title of the application.
+ Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"`
+ // A short description of the application. GFM syntax can be used for rich
+ // text representation.
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ // The Terms of Service for the API.
+ TermsOfService string `protobuf:"bytes,3,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"`
+ // The contact information for the exposed API.
+ Contact *Contact `protobuf:"bytes,4,opt,name=contact,proto3" json:"contact,omitempty"`
+ // The license information for the exposed API.
+ License *License `protobuf:"bytes,5,opt,name=license,proto3" json:"license,omitempty"`
+ // Provides the version of the application API (not to be confused
+ // with the specification version).
+ Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"`
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Info) Reset() {
+ *x = Info{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Info) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Info) ProtoMessage() {}
+
+func (x *Info) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Info) GetTitle() string {
+ if x != nil {
+ return x.Title
+ }
+ return ""
+}
+
+func (x *Info) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *Info) GetTermsOfService() string {
+ if x != nil {
+ return x.TermsOfService
+ }
+ return ""
+}
+
+func (x *Info) GetContact() *Contact {
+ if x != nil {
+ return x.Contact
+ }
+ return nil
+}
+
+func (x *Info) GetLicense() *License {
+ if x != nil {
+ return x.License
+ }
+ return nil
+}
+
+func (x *Info) GetVersion() string {
+ if x != nil {
+ return x.Version
+ }
+ return ""
+}
+
+func (x *Info) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+func (x *Info) SetTitle(v string) {
+ x.Title = v
+}
+
+func (x *Info) SetDescription(v string) {
+ x.Description = v
+}
+
+func (x *Info) SetTermsOfService(v string) {
+ x.TermsOfService = v
+}
+
+func (x *Info) SetContact(v *Contact) {
+ x.Contact = v
+}
+
+func (x *Info) SetLicense(v *License) {
+ x.License = v
+}
+
+func (x *Info) SetVersion(v string) {
+ x.Version = v
+}
+
+func (x *Info) SetExtensions(v map[string]*structpb.Value) {
+ x.Extensions = v
+}
+
+func (x *Info) HasContact() bool {
+ if x == nil {
+ return false
+ }
+ return x.Contact != nil
+}
+
+func (x *Info) HasLicense() bool {
+ if x == nil {
+ return false
+ }
+ return x.License != nil
+}
+
+func (x *Info) ClearContact() {
+ x.Contact = nil
+}
+
+func (x *Info) ClearLicense() {
+ x.License = nil
+}
+
+type Info_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // The title of the application.
+ Title string
+ // A short description of the application. GFM syntax can be used for rich
+ // text representation.
+ Description string
+ // The Terms of Service for the API.
+ TermsOfService string
+ // The contact information for the exposed API.
+ Contact *Contact
+ // The license information for the exposed API.
+ License *License
+ // Provides the version of the application API (not to be confused
+ // with the specification version).
+ Version string
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 Info_builder) Build() *Info {
+ m0 := &Info{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Title = b.Title
+ x.Description = b.Description
+ x.TermsOfService = b.TermsOfService
+ x.Contact = b.Contact
+ x.License = b.License
+ x.Version = b.Version
+ x.Extensions = b.Extensions
+ return m0
+}
+
+// `Contact` is a representation of OpenAPI v2 specification's Contact object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// ...
+// contact: {
+// name: "gRPC-Gateway project";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// email: "none@example.com";
+// };
+// ...
+// };
+// ...
+// };
+type Contact struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // The identifying name of the contact person/organization.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The URL pointing to the contact information. MUST be in the format of a
+ // URL.
+ Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+ // The email address of the contact person/organization. MUST be in the format
+ // of an email address.
+ Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Contact) Reset() {
+ *x = Contact{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Contact) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Contact) ProtoMessage() {}
+
+func (x *Contact) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Contact) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Contact) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+func (x *Contact) GetEmail() string {
+ if x != nil {
+ return x.Email
+ }
+ return ""
+}
+
+func (x *Contact) SetName(v string) {
+ x.Name = v
+}
+
+func (x *Contact) SetUrl(v string) {
+ x.Url = v
+}
+
+func (x *Contact) SetEmail(v string) {
+ x.Email = v
+}
+
+type Contact_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // The identifying name of the contact person/organization.
+ Name string
+ // The URL pointing to the contact information. MUST be in the format of a
+ // URL.
+ Url string
+ // The email address of the contact person/organization. MUST be in the format
+ // of an email address.
+ Email string
+}
+
+func (b0 Contact_builder) Build() *Contact {
+ m0 := &Contact{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Name = b.Name
+ x.Url = b.Url
+ x.Email = b.Email
+ return m0
+}
+
+// `License` is a representation of OpenAPI v2 specification's License object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// ...
+// license: {
+// name: "BSD 3-Clause License";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+// };
+// ...
+// };
+// ...
+// };
+type License struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // The license name used for the API.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // A URL to the license used for the API. MUST be in the format of a URL.
+ Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *License) Reset() {
+ *x = License{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *License) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*License) ProtoMessage() {}
+
+func (x *License) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *License) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *License) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+func (x *License) SetName(v string) {
+ x.Name = v
+}
+
+func (x *License) SetUrl(v string) {
+ x.Url = v
+}
+
+type License_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // The license name used for the API.
+ Name string
+ // A URL to the license used for the API. MUST be in the format of a URL.
+ Url string
+}
+
+func (b0 License_builder) Build() *License {
+ m0 := &License{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Name = b.Name
+ x.Url = b.Url
+ return m0
+}
+
+// `ExternalDocumentation` is a representation of OpenAPI v2 specification's
+// ExternalDocumentation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// ...
+// external_docs: {
+// description: "More about gRPC-Gateway";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// }
+// ...
+// };
+type ExternalDocumentation struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // A short description of the target documentation. GFM syntax can be used for
+ // rich text representation.
+ Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ // The URL for the target documentation. Value MUST be in the format
+ // of a URL.
+ Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ExternalDocumentation) Reset() {
+ *x = ExternalDocumentation{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ExternalDocumentation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExternalDocumentation) ProtoMessage() {}
+
+func (x *ExternalDocumentation) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *ExternalDocumentation) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *ExternalDocumentation) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+func (x *ExternalDocumentation) SetDescription(v string) {
+ x.Description = v
+}
+
+func (x *ExternalDocumentation) SetUrl(v string) {
+ x.Url = v
+}
+
+type ExternalDocumentation_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // A short description of the target documentation. GFM syntax can be used for
+ // rich text representation.
+ Description string
+ // The URL for the target documentation. Value MUST be in the format
+ // of a URL.
+ Url string
+}
+
+func (b0 ExternalDocumentation_builder) Build() *ExternalDocumentation {
+ m0 := &ExternalDocumentation{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Description = b.Description
+ x.Url = b.Url
+ return m0
+}
+
+// `Schema` is a representation of OpenAPI v2 specification's Schema object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+type Schema struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ JsonSchema *JSONSchema `protobuf:"bytes,1,opt,name=json_schema,json=jsonSchema,proto3" json:"json_schema,omitempty"`
+ // Adds support for polymorphism. The discriminator is the schema property
+ // name that is used to differentiate between other schema that inherit this
+ // schema. The property name used MUST be defined at this schema and it MUST
+ // be in the required property list. When used, the value MUST be the name of
+ // this schema or any schema that inherits it.
+ Discriminator string `protobuf:"bytes,2,opt,name=discriminator,proto3" json:"discriminator,omitempty"`
+ // Relevant only for Schema "properties" definitions. Declares the property as
+ // "read only". This means that it MAY be sent as part of a response but MUST
+ // NOT be sent as part of the request. Properties marked as readOnly being
+ // true SHOULD NOT be in the required list of the defined schema. Default
+ // value is false.
+ ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+ // Additional external documentation for this schema.
+ ExternalDocs *ExternalDocumentation `protobuf:"bytes,5,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ // A free-form property to include an example of an instance for this schema in JSON.
+ // This is copied verbatim to the output.
+ Example string `protobuf:"bytes,6,opt,name=example,proto3" json:"example,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Schema) Reset() {
+ *x = Schema{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Schema) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Schema) ProtoMessage() {}
+
+func (x *Schema) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Schema) GetJsonSchema() *JSONSchema {
+ if x != nil {
+ return x.JsonSchema
+ }
+ return nil
+}
+
+func (x *Schema) GetDiscriminator() string {
+ if x != nil {
+ return x.Discriminator
+ }
+ return ""
+}
+
+func (x *Schema) GetReadOnly() bool {
+ if x != nil {
+ return x.ReadOnly
+ }
+ return false
+}
+
+func (x *Schema) GetExternalDocs() *ExternalDocumentation {
+ if x != nil {
+ return x.ExternalDocs
+ }
+ return nil
+}
+
+func (x *Schema) GetExample() string {
+ if x != nil {
+ return x.Example
+ }
+ return ""
+}
+
+func (x *Schema) SetJsonSchema(v *JSONSchema) {
+ x.JsonSchema = v
+}
+
+func (x *Schema) SetDiscriminator(v string) {
+ x.Discriminator = v
+}
+
+func (x *Schema) SetReadOnly(v bool) {
+ x.ReadOnly = v
+}
+
+func (x *Schema) SetExternalDocs(v *ExternalDocumentation) {
+ x.ExternalDocs = v
+}
+
+func (x *Schema) SetExample(v string) {
+ x.Example = v
+}
+
+func (x *Schema) HasJsonSchema() bool {
+ if x == nil {
+ return false
+ }
+ return x.JsonSchema != nil
+}
+
+func (x *Schema) HasExternalDocs() bool {
+ if x == nil {
+ return false
+ }
+ return x.ExternalDocs != nil
+}
+
+func (x *Schema) ClearJsonSchema() {
+ x.JsonSchema = nil
+}
+
+func (x *Schema) ClearExternalDocs() {
+ x.ExternalDocs = nil
+}
+
+type Schema_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ JsonSchema *JSONSchema
+ // Adds support for polymorphism. The discriminator is the schema property
+ // name that is used to differentiate between other schema that inherit this
+ // schema. The property name used MUST be defined at this schema and it MUST
+ // be in the required property list. When used, the value MUST be the name of
+ // this schema or any schema that inherits it.
+ Discriminator string
+ // Relevant only for Schema "properties" definitions. Declares the property as
+ // "read only". This means that it MAY be sent as part of a response but MUST
+ // NOT be sent as part of the request. Properties marked as readOnly being
+ // true SHOULD NOT be in the required list of the defined schema. Default
+ // value is false.
+ ReadOnly bool
+ // Additional external documentation for this schema.
+ ExternalDocs *ExternalDocumentation
+ // A free-form property to include an example of an instance for this schema in JSON.
+ // This is copied verbatim to the output.
+ Example string
+}
+
+func (b0 Schema_builder) Build() *Schema {
+ m0 := &Schema{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.JsonSchema = b.JsonSchema
+ x.Discriminator = b.Discriminator
+ x.ReadOnly = b.ReadOnly
+ x.ExternalDocs = b.ExternalDocs
+ x.Example = b.Example
+ return m0
+}
+
+// `EnumSchema` is subset of fields from the OpenAPI v2 specification's Schema object.
+// Only fields that are applicable to Enums are included
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum) = {
+// ...
+// title: "MyEnum";
+// description:"This is my nice enum";
+// example: "ZERO";
+// required: true;
+// ...
+// };
+type EnumSchema struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // A short description of the schema.
+ Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ Default string `protobuf:"bytes,2,opt,name=default,proto3" json:"default,omitempty"`
+ // The title of the schema.
+ Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"`
+ Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"`
+ ReadOnly bool `protobuf:"varint,5,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+ // Additional external documentation for this schema.
+ ExternalDocs *ExternalDocumentation `protobuf:"bytes,6,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ Example string `protobuf:"bytes,7,opt,name=example,proto3" json:"example,omitempty"`
+ // Ref is used to define an external reference to include in the message.
+ // This could be a fully qualified proto message reference, and that type must
+ // be imported into the protofile. If no message is identified, the Ref will
+ // be used verbatim in the output.
+ // For example:
+ //
+ // `ref: ".google.protobuf.Timestamp"`.
+ Ref string `protobuf:"bytes,8,opt,name=ref,proto3" json:"ref,omitempty"`
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *EnumSchema) Reset() {
+ *x = EnumSchema{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *EnumSchema) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumSchema) ProtoMessage() {}
+
+func (x *EnumSchema) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *EnumSchema) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *EnumSchema) GetDefault() string {
+ if x != nil {
+ return x.Default
+ }
+ return ""
+}
+
+func (x *EnumSchema) GetTitle() string {
+ if x != nil {
+ return x.Title
+ }
+ return ""
+}
+
+func (x *EnumSchema) GetRequired() bool {
+ if x != nil {
+ return x.Required
+ }
+ return false
+}
+
+func (x *EnumSchema) GetReadOnly() bool {
+ if x != nil {
+ return x.ReadOnly
+ }
+ return false
+}
+
+func (x *EnumSchema) GetExternalDocs() *ExternalDocumentation {
+ if x != nil {
+ return x.ExternalDocs
+ }
+ return nil
+}
+
+func (x *EnumSchema) GetExample() string {
+ if x != nil {
+ return x.Example
+ }
+ return ""
+}
+
+func (x *EnumSchema) GetRef() string {
+ if x != nil {
+ return x.Ref
+ }
+ return ""
+}
+
+func (x *EnumSchema) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+func (x *EnumSchema) SetDescription(v string) {
+ x.Description = v
+}
+
+func (x *EnumSchema) SetDefault(v string) {
+ x.Default = v
+}
+
+func (x *EnumSchema) SetTitle(v string) {
+ x.Title = v
+}
+
+func (x *EnumSchema) SetRequired(v bool) {
+ x.Required = v
+}
+
+func (x *EnumSchema) SetReadOnly(v bool) {
+ x.ReadOnly = v
+}
+
+func (x *EnumSchema) SetExternalDocs(v *ExternalDocumentation) {
+ x.ExternalDocs = v
+}
+
+func (x *EnumSchema) SetExample(v string) {
+ x.Example = v
+}
+
+func (x *EnumSchema) SetRef(v string) {
+ x.Ref = v
+}
+
+func (x *EnumSchema) SetExtensions(v map[string]*structpb.Value) {
+ x.Extensions = v
+}
+
+func (x *EnumSchema) HasExternalDocs() bool {
+ if x == nil {
+ return false
+ }
+ return x.ExternalDocs != nil
+}
+
+func (x *EnumSchema) ClearExternalDocs() {
+ x.ExternalDocs = nil
+}
+
+type EnumSchema_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // A short description of the schema.
+ Description string
+ Default string
+ // The title of the schema.
+ Title string
+ Required bool
+ ReadOnly bool
+ // Additional external documentation for this schema.
+ ExternalDocs *ExternalDocumentation
+ Example string
+ // Ref is used to define an external reference to include in the message.
+ // This could be a fully qualified proto message reference, and that type must
+ // be imported into the protofile. If no message is identified, the Ref will
+ // be used verbatim in the output.
+ // For example:
+ //
+ // `ref: ".google.protobuf.Timestamp"`.
+ Ref string
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 EnumSchema_builder) Build() *EnumSchema {
+ m0 := &EnumSchema{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Description = b.Description
+ x.Default = b.Default
+ x.Title = b.Title
+ x.Required = b.Required
+ x.ReadOnly = b.ReadOnly
+ x.ExternalDocs = b.ExternalDocs
+ x.Example = b.Example
+ x.Ref = b.Ref
+ x.Extensions = b.Extensions
+ return m0
+}
+
+// `JSONSchema` represents properties from JSON Schema taken, and as used, in
+// the OpenAPI v2 spec.
+//
+// This includes changes made by OpenAPI v2.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// See also: https://cswr.github.io/JsonSchema/spec/basic_types/,
+// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json
+//
+// Example:
+//
+// message SimpleMessage {
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = {
+// json_schema: {
+// title: "SimpleMessage"
+// description: "A simple message."
+// required: ["id"]
+// }
+// };
+//
+// // Id represents the message identifier.
+// string id = 1; [
+// (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
+// description: "The unique identifier of the simple message."
+// }];
+// }
+type JSONSchema struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // Ref is used to define an external reference to include in the message.
+ // This could be a fully qualified proto message reference, and that type must
+ // be imported into the protofile. If no message is identified, the Ref will
+ // be used verbatim in the output.
+ // For example:
+ //
+ // `ref: ".google.protobuf.Timestamp"`.
+ Ref string `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"`
+ // The title of the schema.
+ Title string `protobuf:"bytes,5,opt,name=title,proto3" json:"title,omitempty"`
+ // A short description of the schema.
+ Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"`
+ Default string `protobuf:"bytes,7,opt,name=default,proto3" json:"default,omitempty"`
+ ReadOnly bool `protobuf:"varint,8,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+ // A free-form property to include a JSON example of this field. This is copied
+ // verbatim to the output swagger.json. Quotes must be escaped.
+ // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+ Example string `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"`
+ MultipleOf float64 `protobuf:"fixed64,10,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"`
+ // Maximum represents an inclusive upper limit for a numeric instance. The
+ // value of MUST be a number,
+ Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"`
+ ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"`
+ // minimum represents an inclusive lower limit for a numeric instance. The
+ // value of MUST be a number,
+ Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"`
+ ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"`
+ MaxLength uint64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"`
+ MinLength uint64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"`
+ Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"`
+ MaxItems uint64 `protobuf:"varint,20,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
+ MinItems uint64 `protobuf:"varint,21,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
+ UniqueItems bool `protobuf:"varint,22,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"`
+ MaxProperties uint64 `protobuf:"varint,24,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"`
+ MinProperties uint64 `protobuf:"varint,25,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"`
+ Required []string `protobuf:"bytes,26,rep,name=required,proto3" json:"required,omitempty"`
+ // Items in 'array' must be unique.
+ Array []string `protobuf:"bytes,34,rep,name=array,proto3" json:"array,omitempty"`
+ Type []JSONSchema_JSONSchemaSimpleTypes `protobuf:"varint,35,rep,packed,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.JSONSchema_JSONSchemaSimpleTypes" json:"type,omitempty"`
+ // `Format`
+ Format string `protobuf:"bytes,36,opt,name=format,proto3" json:"format,omitempty"`
+ // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1
+ Enum []string `protobuf:"bytes,46,rep,name=enum,proto3" json:"enum,omitempty"`
+ // Additional field level properties used when generating the OpenAPI v2 file.
+ FieldConfiguration *JSONSchema_FieldConfiguration `protobuf:"bytes,1001,opt,name=field_configuration,json=fieldConfiguration,proto3" json:"field_configuration,omitempty"`
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value `protobuf:"bytes,48,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *JSONSchema) Reset() {
+ *x = JSONSchema{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *JSONSchema) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*JSONSchema) ProtoMessage() {}
+
+func (x *JSONSchema) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *JSONSchema) GetRef() string {
+ if x != nil {
+ return x.Ref
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetTitle() string {
+ if x != nil {
+ return x.Title
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetDefault() string {
+ if x != nil {
+ return x.Default
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetReadOnly() bool {
+ if x != nil {
+ return x.ReadOnly
+ }
+ return false
+}
+
+func (x *JSONSchema) GetExample() string {
+ if x != nil {
+ return x.Example
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetMultipleOf() float64 {
+ if x != nil {
+ return x.MultipleOf
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetMaximum() float64 {
+ if x != nil {
+ return x.Maximum
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetExclusiveMaximum() bool {
+ if x != nil {
+ return x.ExclusiveMaximum
+ }
+ return false
+}
+
+func (x *JSONSchema) GetMinimum() float64 {
+ if x != nil {
+ return x.Minimum
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetExclusiveMinimum() bool {
+ if x != nil {
+ return x.ExclusiveMinimum
+ }
+ return false
+}
+
+func (x *JSONSchema) GetMaxLength() uint64 {
+ if x != nil {
+ return x.MaxLength
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetMinLength() uint64 {
+ if x != nil {
+ return x.MinLength
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetPattern() string {
+ if x != nil {
+ return x.Pattern
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetMaxItems() uint64 {
+ if x != nil {
+ return x.MaxItems
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetMinItems() uint64 {
+ if x != nil {
+ return x.MinItems
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetUniqueItems() bool {
+ if x != nil {
+ return x.UniqueItems
+ }
+ return false
+}
+
+func (x *JSONSchema) GetMaxProperties() uint64 {
+ if x != nil {
+ return x.MaxProperties
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetMinProperties() uint64 {
+ if x != nil {
+ return x.MinProperties
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetRequired() []string {
+ if x != nil {
+ return x.Required
+ }
+ return nil
+}
+
+func (x *JSONSchema) GetArray() []string {
+ if x != nil {
+ return x.Array
+ }
+ return nil
+}
+
+func (x *JSONSchema) GetType() []JSONSchema_JSONSchemaSimpleTypes {
+ if x != nil {
+ return x.Type
+ }
+ return nil
+}
+
+func (x *JSONSchema) GetFormat() string {
+ if x != nil {
+ return x.Format
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetEnum() []string {
+ if x != nil {
+ return x.Enum
+ }
+ return nil
+}
+
+func (x *JSONSchema) GetFieldConfiguration() *JSONSchema_FieldConfiguration {
+ if x != nil {
+ return x.FieldConfiguration
+ }
+ return nil
+}
+
+func (x *JSONSchema) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+func (x *JSONSchema) SetRef(v string) {
+ x.Ref = v
+}
+
+func (x *JSONSchema) SetTitle(v string) {
+ x.Title = v
+}
+
+func (x *JSONSchema) SetDescription(v string) {
+ x.Description = v
+}
+
+func (x *JSONSchema) SetDefault(v string) {
+ x.Default = v
+}
+
+func (x *JSONSchema) SetReadOnly(v bool) {
+ x.ReadOnly = v
+}
+
+func (x *JSONSchema) SetExample(v string) {
+ x.Example = v
+}
+
+func (x *JSONSchema) SetMultipleOf(v float64) {
+ x.MultipleOf = v
+}
+
+func (x *JSONSchema) SetMaximum(v float64) {
+ x.Maximum = v
+}
+
+func (x *JSONSchema) SetExclusiveMaximum(v bool) {
+ x.ExclusiveMaximum = v
+}
+
+func (x *JSONSchema) SetMinimum(v float64) {
+ x.Minimum = v
+}
+
+func (x *JSONSchema) SetExclusiveMinimum(v bool) {
+ x.ExclusiveMinimum = v
+}
+
+func (x *JSONSchema) SetMaxLength(v uint64) {
+ x.MaxLength = v
+}
+
+func (x *JSONSchema) SetMinLength(v uint64) {
+ x.MinLength = v
+}
+
+func (x *JSONSchema) SetPattern(v string) {
+ x.Pattern = v
+}
+
+func (x *JSONSchema) SetMaxItems(v uint64) {
+ x.MaxItems = v
+}
+
+func (x *JSONSchema) SetMinItems(v uint64) {
+ x.MinItems = v
+}
+
+func (x *JSONSchema) SetUniqueItems(v bool) {
+ x.UniqueItems = v
+}
+
+func (x *JSONSchema) SetMaxProperties(v uint64) {
+ x.MaxProperties = v
+}
+
+func (x *JSONSchema) SetMinProperties(v uint64) {
+ x.MinProperties = v
+}
+
+func (x *JSONSchema) SetRequired(v []string) {
+ x.Required = v
+}
+
+func (x *JSONSchema) SetArray(v []string) {
+ x.Array = v
+}
+
+func (x *JSONSchema) SetType(v []JSONSchema_JSONSchemaSimpleTypes) {
+ x.Type = v
+}
+
+func (x *JSONSchema) SetFormat(v string) {
+ x.Format = v
+}
+
+func (x *JSONSchema) SetEnum(v []string) {
+ x.Enum = v
+}
+
+func (x *JSONSchema) SetFieldConfiguration(v *JSONSchema_FieldConfiguration) {
+ x.FieldConfiguration = v
+}
+
+func (x *JSONSchema) SetExtensions(v map[string]*structpb.Value) {
+ x.Extensions = v
+}
+
+func (x *JSONSchema) HasFieldConfiguration() bool {
+ if x == nil {
+ return false
+ }
+ return x.FieldConfiguration != nil
+}
+
+func (x *JSONSchema) ClearFieldConfiguration() {
+ x.FieldConfiguration = nil
+}
+
+type JSONSchema_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // Ref is used to define an external reference to include in the message.
+ // This could be a fully qualified proto message reference, and that type must
+ // be imported into the protofile. If no message is identified, the Ref will
+ // be used verbatim in the output.
+ // For example:
+ //
+ // `ref: ".google.protobuf.Timestamp"`.
+ Ref string
+ // The title of the schema.
+ Title string
+ // A short description of the schema.
+ Description string
+ Default string
+ ReadOnly bool
+ // A free-form property to include a JSON example of this field. This is copied
+ // verbatim to the output swagger.json. Quotes must be escaped.
+ // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+ Example string
+ MultipleOf float64
+ // Maximum represents an inclusive upper limit for a numeric instance. The
+ // value of MUST be a number,
+ Maximum float64
+ ExclusiveMaximum bool
+ // minimum represents an inclusive lower limit for a numeric instance. The
+ // value of MUST be a number,
+ Minimum float64
+ ExclusiveMinimum bool
+ MaxLength uint64
+ MinLength uint64
+ Pattern string
+ MaxItems uint64
+ MinItems uint64
+ UniqueItems bool
+ MaxProperties uint64
+ MinProperties uint64
+ Required []string
+ // Items in 'array' must be unique.
+ Array []string
+ Type []JSONSchema_JSONSchemaSimpleTypes
+ // `Format`
+ Format string
+ // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1
+ Enum []string
+ // Additional field level properties used when generating the OpenAPI v2 file.
+ FieldConfiguration *JSONSchema_FieldConfiguration
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 JSONSchema_builder) Build() *JSONSchema {
+ m0 := &JSONSchema{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Ref = b.Ref
+ x.Title = b.Title
+ x.Description = b.Description
+ x.Default = b.Default
+ x.ReadOnly = b.ReadOnly
+ x.Example = b.Example
+ x.MultipleOf = b.MultipleOf
+ x.Maximum = b.Maximum
+ x.ExclusiveMaximum = b.ExclusiveMaximum
+ x.Minimum = b.Minimum
+ x.ExclusiveMinimum = b.ExclusiveMinimum
+ x.MaxLength = b.MaxLength
+ x.MinLength = b.MinLength
+ x.Pattern = b.Pattern
+ x.MaxItems = b.MaxItems
+ x.MinItems = b.MinItems
+ x.UniqueItems = b.UniqueItems
+ x.MaxProperties = b.MaxProperties
+ x.MinProperties = b.MinProperties
+ x.Required = b.Required
+ x.Array = b.Array
+ x.Type = b.Type
+ x.Format = b.Format
+ x.Enum = b.Enum
+ x.FieldConfiguration = b.FieldConfiguration
+ x.Extensions = b.Extensions
+ return m0
+}
+
+// `Tag` is a representation of OpenAPI v2 specification's Tag object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject
+type Tag struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // The name of the tag. Use it to allow override of the name of a
+ // global Tag object, then use that name to reference the tag throughout the
+ // OpenAPI file.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // A short description for the tag. GFM syntax can be used for rich text
+ // representation.
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ // Additional external documentation for this tag.
+ ExternalDocs *ExternalDocumentation `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value `protobuf:"bytes,4,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Tag) Reset() {
+ *x = Tag{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Tag) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Tag) ProtoMessage() {}
+
+func (x *Tag) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Tag) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Tag) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *Tag) GetExternalDocs() *ExternalDocumentation {
+ if x != nil {
+ return x.ExternalDocs
+ }
+ return nil
+}
+
+func (x *Tag) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+func (x *Tag) SetName(v string) {
+ x.Name = v
+}
+
+func (x *Tag) SetDescription(v string) {
+ x.Description = v
+}
+
+func (x *Tag) SetExternalDocs(v *ExternalDocumentation) {
+ x.ExternalDocs = v
+}
+
+func (x *Tag) SetExtensions(v map[string]*structpb.Value) {
+ x.Extensions = v
+}
+
+func (x *Tag) HasExternalDocs() bool {
+ if x == nil {
+ return false
+ }
+ return x.ExternalDocs != nil
+}
+
+func (x *Tag) ClearExternalDocs() {
+ x.ExternalDocs = nil
+}
+
+type Tag_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // The name of the tag. Use it to allow override of the name of a
+ // global Tag object, then use that name to reference the tag throughout the
+ // OpenAPI file.
+ Name string
+ // A short description for the tag. GFM syntax can be used for rich text
+ // representation.
+ Description string
+ // Additional external documentation for this tag.
+ ExternalDocs *ExternalDocumentation
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 Tag_builder) Build() *Tag {
+ m0 := &Tag{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Name = b.Name
+ x.Description = b.Description
+ x.ExternalDocs = b.ExternalDocs
+ x.Extensions = b.Extensions
+ return m0
+}
+
+// `SecurityDefinitions` is a representation of OpenAPI v2 specification's
+// Security Definitions object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject
+//
+// A declaration of the security schemes available to be used in the
+// specification. This does not enforce the security schemes on the operations
+// and only serves to provide the relevant details for each scheme.
+type SecurityDefinitions struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // A single security scheme definition, mapping a "name" to the scheme it
+ // defines.
+ Security map[string]*SecurityScheme `protobuf:"bytes,1,rep,name=security,proto3" json:"security,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *SecurityDefinitions) Reset() {
+ *x = SecurityDefinitions{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityDefinitions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityDefinitions) ProtoMessage() {}
+
+func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *SecurityDefinitions) GetSecurity() map[string]*SecurityScheme {
+ if x != nil {
+ return x.Security
+ }
+ return nil
+}
+
+func (x *SecurityDefinitions) SetSecurity(v map[string]*SecurityScheme) {
+ x.Security = v
+}
+
+type SecurityDefinitions_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // A single security scheme definition, mapping a "name" to the scheme it
+ // defines.
+ Security map[string]*SecurityScheme
+}
+
+func (b0 SecurityDefinitions_builder) Build() *SecurityDefinitions {
+ m0 := &SecurityDefinitions{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Security = b.Security
+ return m0
+}
+
+// `SecurityScheme` is a representation of OpenAPI v2 specification's
+// Security Scheme object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject
+//
+// Allows the definition of a security scheme that can be used by the
+// operations. Supported schemes are basic authentication, an API key (either as
+// a header or as a query parameter) and OAuth2's common flows (implicit,
+// password, application and access code).
+type SecurityScheme struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // The type of the security scheme. Valid values are "basic",
+ // "apiKey" or "oauth2".
+ Type SecurityScheme_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Type" json:"type,omitempty"`
+ // A short description for security scheme.
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ // The name of the header or query parameter to be used.
+ // Valid for apiKey.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ // The location of the API key. Valid values are "query" or
+ // "header".
+ // Valid for apiKey.
+ In SecurityScheme_In `protobuf:"varint,4,opt,name=in,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_In" json:"in,omitempty"`
+ // The flow used by the OAuth2 security scheme. Valid values are
+ // "implicit", "password", "application" or "accessCode".
+ // Valid for oauth2.
+ Flow SecurityScheme_Flow `protobuf:"varint,5,opt,name=flow,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Flow" json:"flow,omitempty"`
+ // The authorization URL to be used for this flow. This SHOULD be in
+ // the form of a URL.
+ // Valid for oauth2/implicit and oauth2/accessCode.
+ AuthorizationUrl string `protobuf:"bytes,6,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"`
+ // The token URL to be used for this flow. This SHOULD be in the
+ // form of a URL.
+ // Valid for oauth2/password, oauth2/application and oauth2/accessCode.
+ TokenUrl string `protobuf:"bytes,7,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"`
+ // The available scopes for the OAuth2 security scheme.
+ // Valid for oauth2.
+ Scopes *Scopes `protobuf:"bytes,8,opt,name=scopes,proto3" json:"scopes,omitempty"`
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *SecurityScheme) Reset() {
+ *x = SecurityScheme{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityScheme) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityScheme) ProtoMessage() {}
+
+func (x *SecurityScheme) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *SecurityScheme) GetType() SecurityScheme_Type {
+ if x != nil {
+ return x.Type
+ }
+ return SecurityScheme_TYPE_INVALID
+}
+
+func (x *SecurityScheme) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *SecurityScheme) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *SecurityScheme) GetIn() SecurityScheme_In {
+ if x != nil {
+ return x.In
+ }
+ return SecurityScheme_IN_INVALID
+}
+
+func (x *SecurityScheme) GetFlow() SecurityScheme_Flow {
+ if x != nil {
+ return x.Flow
+ }
+ return SecurityScheme_FLOW_INVALID
+}
+
+func (x *SecurityScheme) GetAuthorizationUrl() string {
+ if x != nil {
+ return x.AuthorizationUrl
+ }
+ return ""
+}
+
+func (x *SecurityScheme) GetTokenUrl() string {
+ if x != nil {
+ return x.TokenUrl
+ }
+ return ""
+}
+
+func (x *SecurityScheme) GetScopes() *Scopes {
+ if x != nil {
+ return x.Scopes
+ }
+ return nil
+}
+
+func (x *SecurityScheme) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+func (x *SecurityScheme) SetType(v SecurityScheme_Type) {
+ x.Type = v
+}
+
+func (x *SecurityScheme) SetDescription(v string) {
+ x.Description = v
+}
+
+func (x *SecurityScheme) SetName(v string) {
+ x.Name = v
+}
+
+func (x *SecurityScheme) SetIn(v SecurityScheme_In) {
+ x.In = v
+}
+
+func (x *SecurityScheme) SetFlow(v SecurityScheme_Flow) {
+ x.Flow = v
+}
+
+func (x *SecurityScheme) SetAuthorizationUrl(v string) {
+ x.AuthorizationUrl = v
+}
+
+func (x *SecurityScheme) SetTokenUrl(v string) {
+ x.TokenUrl = v
+}
+
+func (x *SecurityScheme) SetScopes(v *Scopes) {
+ x.Scopes = v
+}
+
+func (x *SecurityScheme) SetExtensions(v map[string]*structpb.Value) {
+ x.Extensions = v
+}
+
+func (x *SecurityScheme) HasScopes() bool {
+ if x == nil {
+ return false
+ }
+ return x.Scopes != nil
+}
+
+func (x *SecurityScheme) ClearScopes() {
+ x.Scopes = nil
+}
+
+type SecurityScheme_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // The type of the security scheme. Valid values are "basic",
+ // "apiKey" or "oauth2".
+ Type SecurityScheme_Type
+ // A short description for security scheme.
+ Description string
+ // The name of the header or query parameter to be used.
+ // Valid for apiKey.
+ Name string
+ // The location of the API key. Valid values are "query" or
+ // "header".
+ // Valid for apiKey.
+ In SecurityScheme_In
+ // The flow used by the OAuth2 security scheme. Valid values are
+ // "implicit", "password", "application" or "accessCode".
+ // Valid for oauth2.
+ Flow SecurityScheme_Flow
+ // The authorization URL to be used for this flow. This SHOULD be in
+ // the form of a URL.
+ // Valid for oauth2/implicit and oauth2/accessCode.
+ AuthorizationUrl string
+ // The token URL to be used for this flow. This SHOULD be in the
+ // form of a URL.
+ // Valid for oauth2/password, oauth2/application and oauth2/accessCode.
+ TokenUrl string
+ // The available scopes for the OAuth2 security scheme.
+ // Valid for oauth2.
+ Scopes *Scopes
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 SecurityScheme_builder) Build() *SecurityScheme {
+ m0 := &SecurityScheme{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Type = b.Type
+ x.Description = b.Description
+ x.Name = b.Name
+ x.In = b.In
+ x.Flow = b.Flow
+ x.AuthorizationUrl = b.AuthorizationUrl
+ x.TokenUrl = b.TokenUrl
+ x.Scopes = b.Scopes
+ x.Extensions = b.Extensions
+ return m0
+}
+
+// `SecurityRequirement` is a representation of OpenAPI v2 specification's
+// Security Requirement object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject
+//
+// Lists the required security schemes to execute this operation. The object can
+// have multiple security schemes declared in it which are all required (that
+// is, there is a logical AND between the schemes).
+//
+// The name used for each property MUST correspond to a security scheme
+// declared in the Security Definitions.
+type SecurityRequirement struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // Each name must correspond to a security scheme which is declared in
+ // the Security Definitions. If the security scheme is of type "oauth2",
+ // then the value is a list of scope names required for the execution.
+ // For other security scheme types, the array MUST be empty.
+ SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue `protobuf:"bytes,1,rep,name=security_requirement,json=securityRequirement,proto3" json:"security_requirement,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *SecurityRequirement) Reset() {
+ *x = SecurityRequirement{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityRequirement) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityRequirement) ProtoMessage() {}
+
+func (x *SecurityRequirement) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *SecurityRequirement) GetSecurityRequirement() map[string]*SecurityRequirement_SecurityRequirementValue {
+ if x != nil {
+ return x.SecurityRequirement
+ }
+ return nil
+}
+
+func (x *SecurityRequirement) SetSecurityRequirement(v map[string]*SecurityRequirement_SecurityRequirementValue) {
+ x.SecurityRequirement = v
+}
+
+type SecurityRequirement_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // Each name must correspond to a security scheme which is declared in
+ // the Security Definitions. If the security scheme is of type "oauth2",
+ // then the value is a list of scope names required for the execution.
+ // For other security scheme types, the array MUST be empty.
+ SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue
+}
+
+func (b0 SecurityRequirement_builder) Build() *SecurityRequirement {
+ m0 := &SecurityRequirement{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.SecurityRequirement = b.SecurityRequirement
+ return m0
+}
+
+// `Scopes` is a representation of OpenAPI v2 specification's Scopes object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject
+//
+// Lists the available scopes for an OAuth2 security scheme.
+type Scopes struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // Maps between a name of a scope to a short description of it (as the value
+ // of the property).
+ Scope map[string]string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Scopes) Reset() {
+ *x = Scopes{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Scopes) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Scopes) ProtoMessage() {}
+
+func (x *Scopes) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Scopes) GetScope() map[string]string {
+ if x != nil {
+ return x.Scope
+ }
+ return nil
+}
+
+func (x *Scopes) SetScope(v map[string]string) {
+ x.Scope = v
+}
+
+type Scopes_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // Maps between a name of a scope to a short description of it (as the value
+ // of the property).
+ Scope map[string]string
+}
+
+func (b0 Scopes_builder) Build() *Scopes {
+ m0 := &Scopes{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Scope = b.Scope
+ return m0
+}
+
+// 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file.
+// These properties are not defined by OpenAPIv2, but they are used to control the generation.
+type JSONSchema_FieldConfiguration struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // Alternative parameter name when used as path parameter. If set, this will
+ // be used as the complete parameter name when this field is used as a path
+ // parameter. Use this to avoid having auto generated path parameter names
+ // for overlapping paths.
+ PathParamName string `protobuf:"bytes,47,opt,name=path_param_name,json=pathParamName,proto3" json:"path_param_name,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *JSONSchema_FieldConfiguration) Reset() {
+ *x = JSONSchema_FieldConfiguration{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *JSONSchema_FieldConfiguration) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*JSONSchema_FieldConfiguration) ProtoMessage() {}
+
+func (x *JSONSchema_FieldConfiguration) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *JSONSchema_FieldConfiguration) GetPathParamName() string {
+ if x != nil {
+ return x.PathParamName
+ }
+ return ""
+}
+
+func (x *JSONSchema_FieldConfiguration) SetPathParamName(v string) {
+ x.PathParamName = v
+}
+
+type JSONSchema_FieldConfiguration_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // Alternative parameter name when used as path parameter. If set, this will
+ // be used as the complete parameter name when this field is used as a path
+ // parameter. Use this to avoid having auto generated path parameter names
+ // for overlapping paths.
+ PathParamName string
+}
+
+func (b0 JSONSchema_FieldConfiguration_builder) Build() *JSONSchema_FieldConfiguration {
+ m0 := &JSONSchema_FieldConfiguration{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.PathParamName = b.PathParamName
+ return m0
+}
+
+// If the security scheme is of type "oauth2", then the value is a list of
+// scope names required for the execution. For other security scheme types,
+// the array MUST be empty.
+type SecurityRequirement_SecurityRequirementValue struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ Scope []string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) Reset() {
+ *x = SecurityRequirement_SecurityRequirementValue{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityRequirement_SecurityRequirementValue) ProtoMessage() {}
+
+func (x *SecurityRequirement_SecurityRequirementValue) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) GetScope() []string {
+ if x != nil {
+ return x.Scope
+ }
+ return nil
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) SetScope(v []string) {
+ x.Scope = v
+}
+
+type SecurityRequirement_SecurityRequirementValue_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ Scope []string
+}
+
+func (b0 SecurityRequirement_SecurityRequirementValue_builder) Build() *SecurityRequirement_SecurityRequirementValue {
+ m0 := &SecurityRequirement_SecurityRequirementValue{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Scope = b.Scope
+ return m0
+}
+
+var File_protoc_gen_openapiv2_options_openapiv2_proto protoreflect.FileDescriptor
+
+var file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = []byte{
+ 0x0a, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63,
+ 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb3, 0x08, 0x0a, 0x07, 0x53, 0x77, 0x61, 0x67,
+ 0x67, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x12, 0x43, 0x0a,
+ 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72,
+ 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+ 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e,
+ 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x70,
+ 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x62, 0x61, 0x73, 0x65, 0x50,
+ 0x61, 0x74, 0x68, 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x05,
+ 0x20, 0x03, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+ 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73,
+ 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08,
+ 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08,
+ 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72,
+ 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+ 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09,
+ 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x71, 0x0a, 0x14, 0x73, 0x65, 0x63,
+ 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69,
+ 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+ 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5a, 0x0a, 0x08,
+ 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e,
+ 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69,
+ 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72,
+ 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08,
+ 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x42, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73,
+ 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+ 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+ 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x65, 0x0a, 0x0d,
+ 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0e, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44,
+ 0x6f, 0x63, 0x73, 0x12, 0x62, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xd6, 0x07,
+ 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74,
+ 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12,
+ 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65,
+ 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+ 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45,
+ 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f,
+ 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65,
+ 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65,
+ 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x61, 0x0a,
+ 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+ 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73,
+ 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28,
+ 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63,
+ 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a,
+ 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x5a, 0x0a,
+ 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75,
+ 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52,
+ 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x64, 0x0a, 0x0a, 0x65, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x55, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61,
+ 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70, 0x63,
+ 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f,
+ 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
+ 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0x62, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65,
+ 0x74, 0x65, 0x72, 0x73, 0x12, 0x54, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+ 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
+ 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0xa3, 0x02, 0x0a, 0x0f, 0x48,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x53, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+ 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x2e, 0x54,
+ 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72,
+ 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61,
+ 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x45, 0x0a,
+ 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
+ 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0a,
+ 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e,
+ 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45,
+ 0x41, 0x4e, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08,
+ 0x22, 0xd8, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66,
+ 0x61, 0x75, 0x6c, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61,
+ 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x4a, 0x04, 0x08,
+ 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a,
+ 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10,
+ 0x0b, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08,
+ 0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a,
+ 0x04, 0x08, 0x11, 0x10, 0x12, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0x9a, 0x05, 0x0a, 0x08,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5a, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+ 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+ 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+ 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64,
+ 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x73, 0x12, 0x5d, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73,
+ 0x12, 0x63, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+ 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x6d, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x47, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+ 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+ 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd6, 0x03, 0x0a, 0x04, 0x49, 0x6e, 0x66,
+ 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72,
+ 0x6d, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+ 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63,
+ 0x74, 0x12, 0x4c, 0x0a, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+ 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4c,
+ 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12,
+ 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x0a, 0x65, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45,
+ 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a,
+ 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x22, 0x45, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75,
+ 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x2f, 0x0a, 0x07, 0x4c, 0x69, 0x63, 0x65,
+ 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x4b, 0x0a, 0x15, 0x45, 0x78, 0x74,
+ 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0xaa, 0x02, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x12, 0x56, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+ 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+ 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x6a,
+ 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73,
+ 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12,
+ 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a, 0x0d,
+ 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44,
+ 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4a, 0x04, 0x08,
+ 0x04, 0x10, 0x05, 0x22, 0xe8, 0x03, 0x0a, 0x0a, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x14,
+ 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74,
+ 0x69, 0x74, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64,
+ 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a,
+ 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+ 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
+ 0x44, 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x10,
+ 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66,
+ 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+ 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd7,
+ 0x0a, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x10, 0x0a,
+ 0x03, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12,
+ 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
+ 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75,
+ 0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
+ 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x18,
+ 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74,
+ 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d,
+ 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78,
+ 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69,
+ 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65,
+ 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10,
+ 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d,
+ 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28,
+ 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78,
+ 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18,
+ 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65,
+ 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c,
+ 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x78,
+ 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65,
+ 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c,
+ 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e,
+ 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12,
+ 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09,
+ 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69,
+ 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e,
+ 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x18,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74,
+ 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65,
+ 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x69, 0x6e,
+ 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65,
+ 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65,
+ 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x18,
+ 0x22, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x12, 0x5f, 0x0a, 0x04,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x23, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x4b, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x69, 0x6d, 0x70,
+ 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a,
+ 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66,
+ 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x2e, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x7a, 0x0a, 0x13, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x12, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x30, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63,
+ 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f,
+ 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x12,
+ 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d,
+ 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x74,
+ 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x22, 0x77, 0x0a, 0x15, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53,
+ 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e,
+ 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x52, 0x52, 0x41, 0x59,
+ 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45, 0x41, 0x4e, 0x10, 0x02, 0x12,
+ 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04,
+ 0x4e, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52,
+ 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, 0x0a,
+ 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02,
+ 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12,
+ 0x10, 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, 0x4a, 0x04, 0x08, 0x17, 0x10, 0x18, 0x4a, 0x04,
+ 0x08, 0x1b, 0x10, 0x1c, 0x4a, 0x04, 0x08, 0x1c, 0x10, 0x1d, 0x4a, 0x04, 0x08, 0x1d, 0x10, 0x1e,
+ 0x4a, 0x04, 0x08, 0x1e, 0x10, 0x22, 0x4a, 0x04, 0x08, 0x25, 0x10, 0x2a, 0x4a, 0x04, 0x08, 0x2a,
+ 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x2b, 0x10, 0x2e, 0x22, 0xd9, 0x02, 0x0a, 0x03, 0x54, 0x61, 0x67,
+ 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e,
+ 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e,
+ 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x5e, 0x0a,
+ 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61,
+ 0x67, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a,
+ 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+ 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf7, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+ 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x68, 0x0a, 0x08,
+ 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4c,
+ 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69,
+ 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72,
+ 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53,
+ 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x73, 0x65,
+ 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x1a, 0x76, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69,
+ 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4f, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e,
+ 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67,
+ 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68,
+ 0x65, 0x6d, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xff,
+ 0x06, 0x0a, 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x65, 0x12, 0x52, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75,
+ 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4c, 0x0a, 0x02, 0x69,
+ 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x65, 0x2e, 0x49, 0x6e, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x52, 0x0a, 0x04, 0x66, 0x6c, 0x6f,
+ 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x2b, 0x0a,
+ 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75,
+ 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72,
+ 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65,
+ 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70,
+ 0x65, 0x73, 0x12, 0x69, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+ 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+ 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a,
+ 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+ 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4b, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c,
+ 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0e,
+ 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x10,
+ 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x02,
+ 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x41, 0x55, 0x54, 0x48, 0x32, 0x10,
+ 0x03, 0x22, 0x31, 0x0a, 0x02, 0x49, 0x6e, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x5f, 0x49, 0x4e,
+ 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x5f, 0x51, 0x55,
+ 0x45, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x49, 0x4e, 0x5f, 0x48, 0x45, 0x41, 0x44,
+ 0x45, 0x52, 0x10, 0x02, 0x22, 0x6a, 0x0a, 0x04, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x10, 0x0a, 0x0c,
+ 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x11,
+ 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10,
+ 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x57, 0x4f,
+ 0x52, 0x44, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x41, 0x50, 0x50,
+ 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c,
+ 0x4f, 0x57, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x04,
+ 0x22, 0xf6, 0x02, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71,
+ 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x8a, 0x01, 0x0a, 0x14, 0x73, 0x65, 0x63,
+ 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e,
+ 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75,
+ 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
+ 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72,
+ 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0x30, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+ 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09,
+ 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75,
+ 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+ 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72,
+ 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65,
+ 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, 0x01, 0x0a, 0x06, 0x53, 0x63,
+ 0x6f, 0x70, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x38, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70,
+ 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
+ 0x38, 0x01, 0x2a, 0x3b, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x0b, 0x0a, 0x07,
+ 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54,
+ 0x50, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x02, 0x12, 0x06,
+ 0x0a, 0x02, 0x57, 0x53, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x57, 0x53, 0x53, 0x10, 0x04, 0x42,
+ 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72,
+ 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70,
+ 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes = make([]protoimpl.EnumInfo, 6)
+var file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes = make([]protoimpl.MessageInfo, 35)
+var file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = []any{
+ (Scheme)(0), // 0: grpc.gateway.protoc_gen_openapiv2.options.Scheme
+ (HeaderParameter_Type)(0), // 1: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type
+ (JSONSchema_JSONSchemaSimpleTypes)(0), // 2: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes
+ (SecurityScheme_Type)(0), // 3: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type
+ (SecurityScheme_In)(0), // 4: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In
+ (SecurityScheme_Flow)(0), // 5: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow
+ (*Swagger)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger
+ (*Operation)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation
+ (*Parameters)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Parameters
+ (*HeaderParameter)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter
+ (*Header)(nil), // 10: grpc.gateway.protoc_gen_openapiv2.options.Header
+ (*Response)(nil), // 11: grpc.gateway.protoc_gen_openapiv2.options.Response
+ (*Info)(nil), // 12: grpc.gateway.protoc_gen_openapiv2.options.Info
+ (*Contact)(nil), // 13: grpc.gateway.protoc_gen_openapiv2.options.Contact
+ (*License)(nil), // 14: grpc.gateway.protoc_gen_openapiv2.options.License
+ (*ExternalDocumentation)(nil), // 15: grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ (*Schema)(nil), // 16: grpc.gateway.protoc_gen_openapiv2.options.Schema
+ (*EnumSchema)(nil), // 17: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+ (*JSONSchema)(nil), // 18: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+ (*Tag)(nil), // 19: grpc.gateway.protoc_gen_openapiv2.options.Tag
+ (*SecurityDefinitions)(nil), // 20: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions
+ (*SecurityScheme)(nil), // 21: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme
+ (*SecurityRequirement)(nil), // 22: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+ (*Scopes)(nil), // 23: grpc.gateway.protoc_gen_openapiv2.options.Scopes
+ nil, // 24: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry
+ nil, // 25: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry
+ nil, // 26: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry
+ nil, // 27: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry
+ nil, // 28: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry
+ nil, // 29: grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry
+ nil, // 30: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry
+ nil, // 31: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry
+ nil, // 32: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry
+ (*JSONSchema_FieldConfiguration)(nil), // 33: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration
+ nil, // 34: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry
+ nil, // 35: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry
+ nil, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry
+ nil, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry
+ (*SecurityRequirement_SecurityRequirementValue)(nil), // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue
+ nil, // 39: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry
+ nil, // 40: grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry
+ (*structpb.Value)(nil), // 41: google.protobuf.Value
+}
+var file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = []int32{
+ 12, // 0: grpc.gateway.protoc_gen_openapiv2.options.Swagger.info:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info
+ 0, // 1: grpc.gateway.protoc_gen_openapiv2.options.Swagger.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme
+ 24, // 2: grpc.gateway.protoc_gen_openapiv2.options.Swagger.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry
+ 20, // 3: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security_definitions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions
+ 22, // 4: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+ 19, // 5: grpc.gateway.protoc_gen_openapiv2.options.Swagger.tags:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag
+ 15, // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ 25, // 7: grpc.gateway.protoc_gen_openapiv2.options.Swagger.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry
+ 15, // 8: grpc.gateway.protoc_gen_openapiv2.options.Operation.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ 26, // 9: grpc.gateway.protoc_gen_openapiv2.options.Operation.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry
+ 0, // 10: grpc.gateway.protoc_gen_openapiv2.options.Operation.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme
+ 22, // 11: grpc.gateway.protoc_gen_openapiv2.options.Operation.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+ 27, // 12: grpc.gateway.protoc_gen_openapiv2.options.Operation.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry
+ 8, // 13: grpc.gateway.protoc_gen_openapiv2.options.Operation.parameters:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Parameters
+ 9, // 14: grpc.gateway.protoc_gen_openapiv2.options.Parameters.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter
+ 1, // 15: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type
+ 16, // 16: grpc.gateway.protoc_gen_openapiv2.options.Response.schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema
+ 28, // 17: grpc.gateway.protoc_gen_openapiv2.options.Response.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry
+ 29, // 18: grpc.gateway.protoc_gen_openapiv2.options.Response.examples:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry
+ 30, // 19: grpc.gateway.protoc_gen_openapiv2.options.Response.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry
+ 13, // 20: grpc.gateway.protoc_gen_openapiv2.options.Info.contact:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Contact
+ 14, // 21: grpc.gateway.protoc_gen_openapiv2.options.Info.license:type_name -> grpc.gateway.protoc_gen_openapiv2.options.License
+ 31, // 22: grpc.gateway.protoc_gen_openapiv2.options.Info.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry
+ 18, // 23: grpc.gateway.protoc_gen_openapiv2.options.Schema.json_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+ 15, // 24: grpc.gateway.protoc_gen_openapiv2.options.Schema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ 15, // 25: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ 32, // 26: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry
+ 2, // 27: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes
+ 33, // 28: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.field_configuration:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration
+ 34, // 29: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry
+ 15, // 30: grpc.gateway.protoc_gen_openapiv2.options.Tag.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ 35, // 31: grpc.gateway.protoc_gen_openapiv2.options.Tag.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry
+ 36, // 32: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry
+ 3, // 33: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type
+ 4, // 34: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.in:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In
+ 5, // 35: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.flow:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow
+ 23, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.scopes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes
+ 37, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry
+ 39, // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.security_requirement:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry
+ 40, // 39: grpc.gateway.protoc_gen_openapiv2.options.Scopes.scope:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry
+ 11, // 40: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response
+ 41, // 41: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 11, // 42: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response
+ 41, // 43: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 10, // 44: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Header
+ 41, // 45: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 41, // 46: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 41, // 47: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 41, // 48: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 41, // 49: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 21, // 50: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme
+ 41, // 51: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 38, // 52: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue
+ 53, // [53:53] is the sub-list for method output_type
+ 53, // [53:53] is the sub-list for method input_type
+ 53, // [53:53] is the sub-list for extension type_name
+ 53, // [53:53] is the sub-list for extension extendee
+ 0, // [0:53] is the sub-list for field type_name
+}
+
+func init() { file_protoc_gen_openapiv2_options_openapiv2_proto_init() }
+func file_protoc_gen_openapiv2_options_openapiv2_proto_init() {
+ if File_protoc_gen_openapiv2_options_openapiv2_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc,
+ NumEnums: 6,
+ NumMessages: 35,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes,
+ DependencyIndexes: file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs,
+ EnumInfos: file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes,
+ MessageInfos: file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes,
+ }.Build()
+ File_protoc_gen_openapiv2_options_openapiv2_proto = out.File
+ file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = nil
+ file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = nil
+ file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto
new file mode 100644
index 00000000000..5313f0818ae
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto
@@ -0,0 +1,759 @@
+syntax = "proto3";
+
+package grpc.gateway.protoc_gen_openapiv2.options;
+
+import "google/protobuf/struct.proto";
+
+option go_package = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options";
+
+// Scheme describes the schemes supported by the OpenAPI Swagger
+// and Operation objects.
+enum Scheme {
+ UNKNOWN = 0;
+ HTTP = 1;
+ HTTPS = 2;
+ WS = 3;
+ WSS = 4;
+}
+
+// `Swagger` is a representation of OpenAPI v2 specification's Swagger object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// title: "Echo API";
+// version: "1.0";
+// description: "";
+// contact: {
+// name: "gRPC-Gateway project";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// email: "none@example.com";
+// };
+// license: {
+// name: "BSD 3-Clause License";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+// };
+// };
+// schemes: HTTPS;
+// consumes: "application/json";
+// produces: "application/json";
+// };
+//
+message Swagger {
+ // Specifies the OpenAPI Specification version being used. It can be
+ // used by the OpenAPI UI and other clients to interpret the API listing. The
+ // value MUST be "2.0".
+ string swagger = 1;
+ // Provides metadata about the API. The metadata can be used by the
+ // clients if needed.
+ Info info = 2;
+ // The host (name or ip) serving the API. This MUST be the host only and does
+ // not include the scheme nor sub-paths. It MAY include a port. If the host is
+ // not included, the host serving the documentation is to be used (including
+ // the port). The host does not support path templating.
+ string host = 3;
+ // The base path on which the API is served, which is relative to the host. If
+ // it is not included, the API is served directly under the host. The value
+ // MUST start with a leading slash (/). The basePath does not support path
+ // templating.
+ // Note that using `base_path` does not change the endpoint paths that are
+ // generated in the resulting OpenAPI file. If you wish to use `base_path`
+ // with relatively generated OpenAPI paths, the `base_path` prefix must be
+ // manually removed from your `google.api.http` paths and your code changed to
+ // serve the API from the `base_path`.
+ string base_path = 4;
+ // The transfer protocol of the API. Values MUST be from the list: "http",
+ // "https", "ws", "wss". If the schemes is not included, the default scheme to
+ // be used is the one used to access the OpenAPI definition itself.
+ repeated Scheme schemes = 5;
+ // A list of MIME types the APIs can consume. This is global to all APIs but
+ // can be overridden on specific API calls. Value MUST be as described under
+ // Mime Types.
+ repeated string consumes = 6;
+ // A list of MIME types the APIs can produce. This is global to all APIs but
+ // can be overridden on specific API calls. Value MUST be as described under
+ // Mime Types.
+ repeated string produces = 7;
+ // field 8 is reserved for 'paths'.
+ reserved 8;
+ // field 9 is reserved for 'definitions', which at this time are already
+ // exposed as and customizable as proto messages.
+ reserved 9;
+ // An object to hold responses that can be used across operations. This
+ // property does not define global responses for all operations.
+ map responses = 10;
+ // Security scheme definitions that can be used across the specification.
+ SecurityDefinitions security_definitions = 11;
+ // A declaration of which security schemes are applied for the API as a whole.
+ // The list of values describes alternative security schemes that can be used
+ // (that is, there is a logical OR between the security requirements).
+ // Individual operations can override this definition.
+ repeated SecurityRequirement security = 12;
+ // A list of tags for API documentation control. Tags can be used for logical
+ // grouping of operations by resources or any other qualifier.
+ repeated Tag tags = 13;
+ // Additional external documentation.
+ ExternalDocumentation external_docs = 14;
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ map extensions = 15;
+}
+
+// `Operation` is a representation of OpenAPI v2 specification's Operation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject
+//
+// Example:
+//
+// service EchoService {
+// rpc Echo(SimpleMessage) returns (SimpleMessage) {
+// option (google.api.http) = {
+// get: "/v1/example/echo/{id}"
+// };
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = {
+// summary: "Get a message.";
+// operation_id: "getMessage";
+// tags: "echo";
+// responses: {
+// key: "200"
+// value: {
+// description: "OK";
+// }
+// }
+// };
+// }
+// }
+message Operation {
+ // A list of tags for API documentation control. Tags can be used for logical
+ // grouping of operations by resources or any other qualifier.
+ repeated string tags = 1;
+ // A short summary of what the operation does. For maximum readability in the
+ // swagger-ui, this field SHOULD be less than 120 characters.
+ string summary = 2;
+ // A verbose explanation of the operation behavior. GFM syntax can be used for
+ // rich text representation.
+ string description = 3;
+ // Additional external documentation for this operation.
+ ExternalDocumentation external_docs = 4;
+ // Unique string used to identify the operation. The id MUST be unique among
+ // all operations described in the API. Tools and libraries MAY use the
+ // operationId to uniquely identify an operation, therefore, it is recommended
+ // to follow common programming naming conventions.
+ string operation_id = 5;
+ // A list of MIME types the operation can consume. This overrides the consumes
+ // definition at the OpenAPI Object. An empty value MAY be used to clear the
+ // global definition. Value MUST be as described under Mime Types.
+ repeated string consumes = 6;
+ // A list of MIME types the operation can produce. This overrides the produces
+ // definition at the OpenAPI Object. An empty value MAY be used to clear the
+ // global definition. Value MUST be as described under Mime Types.
+ repeated string produces = 7;
+ // field 8 is reserved for 'parameters'.
+ reserved 8;
+ // The list of possible responses as they are returned from executing this
+ // operation.
+ map responses = 9;
+ // The transfer protocol for the operation. Values MUST be from the list:
+ // "http", "https", "ws", "wss". The value overrides the OpenAPI Object
+ // schemes definition.
+ repeated Scheme schemes = 10;
+ // Declares this operation to be deprecated. Usage of the declared operation
+ // should be refrained. Default value is false.
+ bool deprecated = 11;
+ // A declaration of which security schemes are applied for this operation. The
+ // list of values describes alternative security schemes that can be used
+ // (that is, there is a logical OR between the security requirements). This
+ // definition overrides any declared top-level security. To remove a top-level
+ // security declaration, an empty array can be used.
+ repeated SecurityRequirement security = 12;
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ map extensions = 13;
+ // Custom parameters such as HTTP request headers.
+ // See: https://swagger.io/docs/specification/2-0/describing-parameters/
+ // and https://swagger.io/specification/v2/#parameter-object.
+ Parameters parameters = 14;
+}
+
+// `Parameters` is a representation of OpenAPI v2 specification's parameters object.
+// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only
+// allow header parameters to be set here since we do not want users specifying custom non-header
+// parameters beyond those inferred from the Protobuf schema.
+// See: https://swagger.io/specification/v2/#parameter-object
+message Parameters {
+ // `Headers` is one or more HTTP header parameter.
+ // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters
+ repeated HeaderParameter headers = 1;
+}
+
+// `HeaderParameter` a HTTP header parameter.
+// See: https://swagger.io/specification/v2/#parameter-object
+message HeaderParameter {
+ // `Type` is a supported HTTP header type.
+ // See https://swagger.io/specification/v2/#parameterType.
+ enum Type {
+ UNKNOWN = 0;
+ STRING = 1;
+ NUMBER = 2;
+ INTEGER = 3;
+ BOOLEAN = 4;
+ }
+
+ // `Name` is the header name.
+ string name = 1;
+ // `Description` is a short description of the header.
+ string description = 2;
+ // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+ // See: https://swagger.io/specification/v2/#parameterType.
+ Type type = 3;
+ // `Format` The extending format for the previously mentioned type.
+ string format = 4;
+ // `Required` indicates if the header is optional
+ bool required = 5;
+ // field 6 is reserved for 'items', but in OpenAPI-specific way.
+ reserved 6;
+ // field 7 is reserved `Collection Format`. Determines the format of the array if type array is used.
+ reserved 7;
+}
+
+// `Header` is a representation of OpenAPI v2 specification's Header object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject
+//
+message Header {
+ // `Description` is a short description of the header.
+ string description = 1;
+ // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+ string type = 2;
+ // `Format` The extending format for the previously mentioned type.
+ string format = 3;
+ // field 4 is reserved for 'items', but in OpenAPI-specific way.
+ reserved 4;
+ // field 5 is reserved `Collection Format` Determines the format of the array if type array is used.
+ reserved 5;
+ // `Default` Declares the value of the header that the server will use if none is provided.
+ // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2.
+ // Unlike JSON Schema this value MUST conform to the defined type for the header.
+ string default = 6;
+ // field 7 is reserved for 'maximum'.
+ reserved 7;
+ // field 8 is reserved for 'exclusiveMaximum'.
+ reserved 8;
+ // field 9 is reserved for 'minimum'.
+ reserved 9;
+ // field 10 is reserved for 'exclusiveMinimum'.
+ reserved 10;
+ // field 11 is reserved for 'maxLength'.
+ reserved 11;
+ // field 12 is reserved for 'minLength'.
+ reserved 12;
+ // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3.
+ string pattern = 13;
+ // field 14 is reserved for 'maxItems'.
+ reserved 14;
+ // field 15 is reserved for 'minItems'.
+ reserved 15;
+ // field 16 is reserved for 'uniqueItems'.
+ reserved 16;
+ // field 17 is reserved for 'enum'.
+ reserved 17;
+ // field 18 is reserved for 'multipleOf'.
+ reserved 18;
+}
+
+// `Response` is a representation of OpenAPI v2 specification's Response object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject
+//
+message Response {
+ // `Description` is a short description of the response.
+ // GFM syntax can be used for rich text representation.
+ string description = 1;
+ // `Schema` optionally defines the structure of the response.
+ // If `Schema` is not provided, it means there is no content to the response.
+ Schema schema = 2;
+ // `Headers` A list of headers that are sent with the response.
+ // `Header` name is expected to be a string in the canonical format of the MIME header key
+ // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey
+ map headers = 3;
+ // `Examples` gives per-mimetype response examples.
+ // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object
+ map examples = 4;
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ map extensions = 5;
+}
+
+// `Info` is a representation of OpenAPI v2 specification's Info object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// title: "Echo API";
+// version: "1.0";
+// description: "";
+// contact: {
+// name: "gRPC-Gateway project";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// email: "none@example.com";
+// };
+// license: {
+// name: "BSD 3-Clause License";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+// };
+// };
+// ...
+// };
+//
+message Info {
+ // The title of the application.
+ string title = 1;
+ // A short description of the application. GFM syntax can be used for rich
+ // text representation.
+ string description = 2;
+ // The Terms of Service for the API.
+ string terms_of_service = 3;
+ // The contact information for the exposed API.
+ Contact contact = 4;
+ // The license information for the exposed API.
+ License license = 5;
+ // Provides the version of the application API (not to be confused
+ // with the specification version).
+ string version = 6;
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ map extensions = 7;
+}
+
+// `Contact` is a representation of OpenAPI v2 specification's Contact object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// ...
+// contact: {
+// name: "gRPC-Gateway project";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// email: "none@example.com";
+// };
+// ...
+// };
+// ...
+// };
+//
+message Contact {
+ // The identifying name of the contact person/organization.
+ string name = 1;
+ // The URL pointing to the contact information. MUST be in the format of a
+ // URL.
+ string url = 2;
+ // The email address of the contact person/organization. MUST be in the format
+ // of an email address.
+ string email = 3;
+}
+
+// `License` is a representation of OpenAPI v2 specification's License object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// ...
+// license: {
+// name: "BSD 3-Clause License";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+// };
+// ...
+// };
+// ...
+// };
+//
+message License {
+ // The license name used for the API.
+ string name = 1;
+ // A URL to the license used for the API. MUST be in the format of a URL.
+ string url = 2;
+}
+
+// `ExternalDocumentation` is a representation of OpenAPI v2 specification's
+// ExternalDocumentation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// ...
+// external_docs: {
+// description: "More about gRPC-Gateway";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// }
+// ...
+// };
+//
+message ExternalDocumentation {
+ // A short description of the target documentation. GFM syntax can be used for
+ // rich text representation.
+ string description = 1;
+ // The URL for the target documentation. Value MUST be in the format
+ // of a URL.
+ string url = 2;
+}
+
+// `Schema` is a representation of OpenAPI v2 specification's Schema object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+message Schema {
+ JSONSchema json_schema = 1;
+ // Adds support for polymorphism. The discriminator is the schema property
+ // name that is used to differentiate between other schema that inherit this
+ // schema. The property name used MUST be defined at this schema and it MUST
+ // be in the required property list. When used, the value MUST be the name of
+ // this schema or any schema that inherits it.
+ string discriminator = 2;
+ // Relevant only for Schema "properties" definitions. Declares the property as
+ // "read only". This means that it MAY be sent as part of a response but MUST
+ // NOT be sent as part of the request. Properties marked as readOnly being
+ // true SHOULD NOT be in the required list of the defined schema. Default
+ // value is false.
+ bool read_only = 3;
+ // field 4 is reserved for 'xml'.
+ reserved 4;
+ // Additional external documentation for this schema.
+ ExternalDocumentation external_docs = 5;
+ // A free-form property to include an example of an instance for this schema in JSON.
+ // This is copied verbatim to the output.
+ string example = 6;
+}
+
+// `EnumSchema` is subset of fields from the OpenAPI v2 specification's Schema object.
+// Only fields that are applicable to Enums are included
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum) = {
+// ...
+// title: "MyEnum";
+// description:"This is my nice enum";
+// example: "ZERO";
+// required: true;
+// ...
+// };
+//
+message EnumSchema {
+ // A short description of the schema.
+ string description = 1;
+ string default = 2;
+ // The title of the schema.
+ string title = 3;
+ bool required = 4;
+ bool read_only = 5;
+ // Additional external documentation for this schema.
+ ExternalDocumentation external_docs = 6;
+ string example = 7;
+ // Ref is used to define an external reference to include in the message.
+ // This could be a fully qualified proto message reference, and that type must
+ // be imported into the protofile. If no message is identified, the Ref will
+ // be used verbatim in the output.
+ // For example:
+ // `ref: ".google.protobuf.Timestamp"`.
+ string ref = 8;
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ map extensions = 9;
+}
+
+// `JSONSchema` represents properties from JSON Schema taken, and as used, in
+// the OpenAPI v2 spec.
+//
+// This includes changes made by OpenAPI v2.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// See also: https://cswr.github.io/JsonSchema/spec/basic_types/,
+// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json
+//
+// Example:
+//
+// message SimpleMessage {
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = {
+// json_schema: {
+// title: "SimpleMessage"
+// description: "A simple message."
+// required: ["id"]
+// }
+// };
+//
+// // Id represents the message identifier.
+// string id = 1; [
+// (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
+// description: "The unique identifier of the simple message."
+// }];
+// }
+//
+message JSONSchema {
+ // field 1 is reserved for '$id', omitted from OpenAPI v2.
+ reserved 1;
+ // field 2 is reserved for '$schema', omitted from OpenAPI v2.
+ reserved 2;
+ // Ref is used to define an external reference to include in the message.
+ // This could be a fully qualified proto message reference, and that type must
+ // be imported into the protofile. If no message is identified, the Ref will
+ // be used verbatim in the output.
+ // For example:
+ // `ref: ".google.protobuf.Timestamp"`.
+ string ref = 3;
+ // field 4 is reserved for '$comment', omitted from OpenAPI v2.
+ reserved 4;
+ // The title of the schema.
+ string title = 5;
+ // A short description of the schema.
+ string description = 6;
+ string default = 7;
+ bool read_only = 8;
+ // A free-form property to include a JSON example of this field. This is copied
+ // verbatim to the output swagger.json. Quotes must be escaped.
+ // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+ string example = 9;
+ double multiple_of = 10;
+ // Maximum represents an inclusive upper limit for a numeric instance. The
+ // value of MUST be a number,
+ double maximum = 11;
+ bool exclusive_maximum = 12;
+ // minimum represents an inclusive lower limit for a numeric instance. The
+ // value of MUST be a number,
+ double minimum = 13;
+ bool exclusive_minimum = 14;
+ uint64 max_length = 15;
+ uint64 min_length = 16;
+ string pattern = 17;
+ // field 18 is reserved for 'additionalItems', omitted from OpenAPI v2.
+ reserved 18;
+ // field 19 is reserved for 'items', but in OpenAPI-specific way.
+ // TODO(ivucica): add 'items'?
+ reserved 19;
+ uint64 max_items = 20;
+ uint64 min_items = 21;
+ bool unique_items = 22;
+ // field 23 is reserved for 'contains', omitted from OpenAPI v2.
+ reserved 23;
+ uint64 max_properties = 24;
+ uint64 min_properties = 25;
+ repeated string required = 26;
+ // field 27 is reserved for 'additionalProperties', but in OpenAPI-specific
+ // way. TODO(ivucica): add 'additionalProperties'?
+ reserved 27;
+ // field 28 is reserved for 'definitions', omitted from OpenAPI v2.
+ reserved 28;
+ // field 29 is reserved for 'properties', but in OpenAPI-specific way.
+ // TODO(ivucica): add 'additionalProperties'?
+ reserved 29;
+ // following fields are reserved, as the properties have been omitted from
+ // OpenAPI v2:
+ // patternProperties, dependencies, propertyNames, const
+ reserved 30 to 33;
+ // Items in 'array' must be unique.
+ repeated string array = 34;
+
+ enum JSONSchemaSimpleTypes {
+ UNKNOWN = 0;
+ ARRAY = 1;
+ BOOLEAN = 2;
+ INTEGER = 3;
+ NULL = 4;
+ NUMBER = 5;
+ OBJECT = 6;
+ STRING = 7;
+ }
+
+ repeated JSONSchemaSimpleTypes type = 35;
+ // `Format`
+ string format = 36;
+ // following fields are reserved, as the properties have been omitted from
+ // OpenAPI v2: contentMediaType, contentEncoding, if, then, else
+ reserved 37 to 41;
+ // field 42 is reserved for 'allOf', but in OpenAPI-specific way.
+ // TODO(ivucica): add 'allOf'?
+ reserved 42;
+ // following fields are reserved, as the properties have been omitted from
+ // OpenAPI v2:
+ // anyOf, oneOf, not
+ reserved 43 to 45;
+ // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1
+ repeated string enum = 46;
+
+ // Additional field level properties used when generating the OpenAPI v2 file.
+ FieldConfiguration field_configuration = 1001;
+
+ // 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file.
+ // These properties are not defined by OpenAPIv2, but they are used to control the generation.
+ message FieldConfiguration {
+ // Alternative parameter name when used as path parameter. If set, this will
+ // be used as the complete parameter name when this field is used as a path
+ // parameter. Use this to avoid having auto generated path parameter names
+ // for overlapping paths.
+ string path_param_name = 47;
+ }
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ map extensions = 48;
+}
+
+// `Tag` is a representation of OpenAPI v2 specification's Tag object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject
+//
+message Tag {
+ // The name of the tag. Use it to allow override of the name of a
+ // global Tag object, then use that name to reference the tag throughout the
+ // OpenAPI file.
+ string name = 1;
+ // A short description for the tag. GFM syntax can be used for rich text
+ // representation.
+ string description = 2;
+ // Additional external documentation for this tag.
+ ExternalDocumentation external_docs = 3;
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ map extensions = 4;
+}
+
+// `SecurityDefinitions` is a representation of OpenAPI v2 specification's
+// Security Definitions object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject
+//
+// A declaration of the security schemes available to be used in the
+// specification. This does not enforce the security schemes on the operations
+// and only serves to provide the relevant details for each scheme.
+message SecurityDefinitions {
+ // A single security scheme definition, mapping a "name" to the scheme it
+ // defines.
+ map security = 1;
+}
+
+// `SecurityScheme` is a representation of OpenAPI v2 specification's
+// Security Scheme object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject
+//
+// Allows the definition of a security scheme that can be used by the
+// operations. Supported schemes are basic authentication, an API key (either as
+// a header or as a query parameter) and OAuth2's common flows (implicit,
+// password, application and access code).
+message SecurityScheme {
+ // The type of the security scheme. Valid values are "basic",
+ // "apiKey" or "oauth2".
+ enum Type {
+ TYPE_INVALID = 0;
+ TYPE_BASIC = 1;
+ TYPE_API_KEY = 2;
+ TYPE_OAUTH2 = 3;
+ }
+
+ // The location of the API key. Valid values are "query" or "header".
+ enum In {
+ IN_INVALID = 0;
+ IN_QUERY = 1;
+ IN_HEADER = 2;
+ }
+
+ // The flow used by the OAuth2 security scheme. Valid values are
+ // "implicit", "password", "application" or "accessCode".
+ enum Flow {
+ FLOW_INVALID = 0;
+ FLOW_IMPLICIT = 1;
+ FLOW_PASSWORD = 2;
+ FLOW_APPLICATION = 3;
+ FLOW_ACCESS_CODE = 4;
+ }
+
+ // The type of the security scheme. Valid values are "basic",
+ // "apiKey" or "oauth2".
+ Type type = 1;
+ // A short description for security scheme.
+ string description = 2;
+ // The name of the header or query parameter to be used.
+ // Valid for apiKey.
+ string name = 3;
+ // The location of the API key. Valid values are "query" or
+ // "header".
+ // Valid for apiKey.
+ In in = 4;
+ // The flow used by the OAuth2 security scheme. Valid values are
+ // "implicit", "password", "application" or "accessCode".
+ // Valid for oauth2.
+ Flow flow = 5;
+ // The authorization URL to be used for this flow. This SHOULD be in
+ // the form of a URL.
+ // Valid for oauth2/implicit and oauth2/accessCode.
+ string authorization_url = 6;
+ // The token URL to be used for this flow. This SHOULD be in the
+ // form of a URL.
+ // Valid for oauth2/password, oauth2/application and oauth2/accessCode.
+ string token_url = 7;
+ // The available scopes for the OAuth2 security scheme.
+ // Valid for oauth2.
+ Scopes scopes = 8;
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ map extensions = 9;
+}
+
+// `SecurityRequirement` is a representation of OpenAPI v2 specification's
+// Security Requirement object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject
+//
+// Lists the required security schemes to execute this operation. The object can
+// have multiple security schemes declared in it which are all required (that
+// is, there is a logical AND between the schemes).
+//
+// The name used for each property MUST correspond to a security scheme
+// declared in the Security Definitions.
+message SecurityRequirement {
+ // If the security scheme is of type "oauth2", then the value is a list of
+ // scope names required for the execution. For other security scheme types,
+ // the array MUST be empty.
+ message SecurityRequirementValue {
+ repeated string scope = 1;
+ }
+ // Each name must correspond to a security scheme which is declared in
+ // the Security Definitions. If the security scheme is of type "oauth2",
+ // then the value is a list of scope names required for the execution.
+ // For other security scheme types, the array MUST be empty.
+ map security_requirement = 1;
+}
+
+// `Scopes` is a representation of OpenAPI v2 specification's Scopes object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject
+//
+// Lists the available scopes for an OAuth2 security scheme.
+message Scopes {
+ // Maps between a name of a scope to a short description of it (as the value
+ // of the property).
+ map scope = 1;
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2_protoopaque.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2_protoopaque.pb.go
new file mode 100644
index 00000000000..1f0e0c26916
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2_protoopaque.pb.go
@@ -0,0 +1,4055 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.0
+// protoc (unknown)
+// source: protoc-gen-openapiv2/options/openapiv2.proto
+
+//go:build protoopaque
+
+package options
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Scheme describes the schemes supported by the OpenAPI Swagger
+// and Operation objects.
+type Scheme int32
+
+const (
+ Scheme_UNKNOWN Scheme = 0
+ Scheme_HTTP Scheme = 1
+ Scheme_HTTPS Scheme = 2
+ Scheme_WS Scheme = 3
+ Scheme_WSS Scheme = 4
+)
+
+// Enum value maps for Scheme.
+var (
+ Scheme_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "HTTP",
+ 2: "HTTPS",
+ 3: "WS",
+ 4: "WSS",
+ }
+ Scheme_value = map[string]int32{
+ "UNKNOWN": 0,
+ "HTTP": 1,
+ "HTTPS": 2,
+ "WS": 3,
+ "WSS": 4,
+ }
+)
+
+func (x Scheme) Enum() *Scheme {
+ p := new(Scheme)
+ *p = x
+ return p
+}
+
+func (x Scheme) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Scheme) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0].Descriptor()
+}
+
+func (Scheme) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0]
+}
+
+func (x Scheme) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// `Type` is a supported HTTP header type.
+// See https://swagger.io/specification/v2/#parameterType.
+type HeaderParameter_Type int32
+
+const (
+ HeaderParameter_UNKNOWN HeaderParameter_Type = 0
+ HeaderParameter_STRING HeaderParameter_Type = 1
+ HeaderParameter_NUMBER HeaderParameter_Type = 2
+ HeaderParameter_INTEGER HeaderParameter_Type = 3
+ HeaderParameter_BOOLEAN HeaderParameter_Type = 4
+)
+
+// Enum value maps for HeaderParameter_Type.
+var (
+ HeaderParameter_Type_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "STRING",
+ 2: "NUMBER",
+ 3: "INTEGER",
+ 4: "BOOLEAN",
+ }
+ HeaderParameter_Type_value = map[string]int32{
+ "UNKNOWN": 0,
+ "STRING": 1,
+ "NUMBER": 2,
+ "INTEGER": 3,
+ "BOOLEAN": 4,
+ }
+)
+
+func (x HeaderParameter_Type) Enum() *HeaderParameter_Type {
+ p := new(HeaderParameter_Type)
+ *p = x
+ return p
+}
+
+func (x HeaderParameter_Type) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HeaderParameter_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1].Descriptor()
+}
+
+func (HeaderParameter_Type) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1]
+}
+
+func (x HeaderParameter_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+type JSONSchema_JSONSchemaSimpleTypes int32
+
+const (
+ JSONSchema_UNKNOWN JSONSchema_JSONSchemaSimpleTypes = 0
+ JSONSchema_ARRAY JSONSchema_JSONSchemaSimpleTypes = 1
+ JSONSchema_BOOLEAN JSONSchema_JSONSchemaSimpleTypes = 2
+ JSONSchema_INTEGER JSONSchema_JSONSchemaSimpleTypes = 3
+ JSONSchema_NULL JSONSchema_JSONSchemaSimpleTypes = 4
+ JSONSchema_NUMBER JSONSchema_JSONSchemaSimpleTypes = 5
+ JSONSchema_OBJECT JSONSchema_JSONSchemaSimpleTypes = 6
+ JSONSchema_STRING JSONSchema_JSONSchemaSimpleTypes = 7
+)
+
+// Enum value maps for JSONSchema_JSONSchemaSimpleTypes.
+var (
+ JSONSchema_JSONSchemaSimpleTypes_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "ARRAY",
+ 2: "BOOLEAN",
+ 3: "INTEGER",
+ 4: "NULL",
+ 5: "NUMBER",
+ 6: "OBJECT",
+ 7: "STRING",
+ }
+ JSONSchema_JSONSchemaSimpleTypes_value = map[string]int32{
+ "UNKNOWN": 0,
+ "ARRAY": 1,
+ "BOOLEAN": 2,
+ "INTEGER": 3,
+ "NULL": 4,
+ "NUMBER": 5,
+ "OBJECT": 6,
+ "STRING": 7,
+ }
+)
+
+func (x JSONSchema_JSONSchemaSimpleTypes) Enum() *JSONSchema_JSONSchemaSimpleTypes {
+ p := new(JSONSchema_JSONSchemaSimpleTypes)
+ *p = x
+ return p
+}
+
+func (x JSONSchema_JSONSchemaSimpleTypes) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (JSONSchema_JSONSchemaSimpleTypes) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2].Descriptor()
+}
+
+func (JSONSchema_JSONSchemaSimpleTypes) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2]
+}
+
+func (x JSONSchema_JSONSchemaSimpleTypes) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// The type of the security scheme. Valid values are "basic",
+// "apiKey" or "oauth2".
+type SecurityScheme_Type int32
+
+const (
+ SecurityScheme_TYPE_INVALID SecurityScheme_Type = 0
+ SecurityScheme_TYPE_BASIC SecurityScheme_Type = 1
+ SecurityScheme_TYPE_API_KEY SecurityScheme_Type = 2
+ SecurityScheme_TYPE_OAUTH2 SecurityScheme_Type = 3
+)
+
+// Enum value maps for SecurityScheme_Type.
+var (
+ SecurityScheme_Type_name = map[int32]string{
+ 0: "TYPE_INVALID",
+ 1: "TYPE_BASIC",
+ 2: "TYPE_API_KEY",
+ 3: "TYPE_OAUTH2",
+ }
+ SecurityScheme_Type_value = map[string]int32{
+ "TYPE_INVALID": 0,
+ "TYPE_BASIC": 1,
+ "TYPE_API_KEY": 2,
+ "TYPE_OAUTH2": 3,
+ }
+)
+
+func (x SecurityScheme_Type) Enum() *SecurityScheme_Type {
+ p := new(SecurityScheme_Type)
+ *p = x
+ return p
+}
+
+func (x SecurityScheme_Type) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3].Descriptor()
+}
+
+func (SecurityScheme_Type) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3]
+}
+
+func (x SecurityScheme_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// The location of the API key. Valid values are "query" or "header".
+type SecurityScheme_In int32
+
+const (
+ SecurityScheme_IN_INVALID SecurityScheme_In = 0
+ SecurityScheme_IN_QUERY SecurityScheme_In = 1
+ SecurityScheme_IN_HEADER SecurityScheme_In = 2
+)
+
+// Enum value maps for SecurityScheme_In.
+var (
+ SecurityScheme_In_name = map[int32]string{
+ 0: "IN_INVALID",
+ 1: "IN_QUERY",
+ 2: "IN_HEADER",
+ }
+ SecurityScheme_In_value = map[string]int32{
+ "IN_INVALID": 0,
+ "IN_QUERY": 1,
+ "IN_HEADER": 2,
+ }
+)
+
+func (x SecurityScheme_In) Enum() *SecurityScheme_In {
+ p := new(SecurityScheme_In)
+ *p = x
+ return p
+}
+
+func (x SecurityScheme_In) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_In) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4].Descriptor()
+}
+
+func (SecurityScheme_In) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4]
+}
+
+func (x SecurityScheme_In) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// The flow used by the OAuth2 security scheme. Valid values are
+// "implicit", "password", "application" or "accessCode".
+type SecurityScheme_Flow int32
+
+const (
+ SecurityScheme_FLOW_INVALID SecurityScheme_Flow = 0
+ SecurityScheme_FLOW_IMPLICIT SecurityScheme_Flow = 1
+ SecurityScheme_FLOW_PASSWORD SecurityScheme_Flow = 2
+ SecurityScheme_FLOW_APPLICATION SecurityScheme_Flow = 3
+ SecurityScheme_FLOW_ACCESS_CODE SecurityScheme_Flow = 4
+)
+
+// Enum value maps for SecurityScheme_Flow.
+var (
+ SecurityScheme_Flow_name = map[int32]string{
+ 0: "FLOW_INVALID",
+ 1: "FLOW_IMPLICIT",
+ 2: "FLOW_PASSWORD",
+ 3: "FLOW_APPLICATION",
+ 4: "FLOW_ACCESS_CODE",
+ }
+ SecurityScheme_Flow_value = map[string]int32{
+ "FLOW_INVALID": 0,
+ "FLOW_IMPLICIT": 1,
+ "FLOW_PASSWORD": 2,
+ "FLOW_APPLICATION": 3,
+ "FLOW_ACCESS_CODE": 4,
+ }
+)
+
+func (x SecurityScheme_Flow) Enum() *SecurityScheme_Flow {
+ p := new(SecurityScheme_Flow)
+ *p = x
+ return p
+}
+
+func (x SecurityScheme_Flow) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_Flow) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5].Descriptor()
+}
+
+func (SecurityScheme_Flow) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5]
+}
+
+func (x SecurityScheme_Flow) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// `Swagger` is a representation of OpenAPI v2 specification's Swagger object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// title: "Echo API";
+// version: "1.0";
+// description: "";
+// contact: {
+// name: "gRPC-Gateway project";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// email: "none@example.com";
+// };
+// license: {
+// name: "BSD 3-Clause License";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+// };
+// };
+// schemes: HTTPS;
+// consumes: "application/json";
+// produces: "application/json";
+// };
+type Swagger struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"`
+ xxx_hidden_Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"`
+ xxx_hidden_Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"`
+ xxx_hidden_BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"`
+ xxx_hidden_Schemes []Scheme `protobuf:"varint,5,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"`
+ xxx_hidden_Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"`
+ xxx_hidden_Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"`
+ xxx_hidden_Responses map[string]*Response `protobuf:"bytes,10,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ xxx_hidden_SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,11,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"`
+ xxx_hidden_Security *[]*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"`
+ xxx_hidden_Tags *[]*Tag `protobuf:"bytes,13,rep,name=tags,proto3" json:"tags,omitempty"`
+ xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,14,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,15,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Swagger) Reset() {
+ *x = Swagger{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Swagger) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Swagger) ProtoMessage() {}
+
+func (x *Swagger) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Swagger) GetSwagger() string {
+ if x != nil {
+ return x.xxx_hidden_Swagger
+ }
+ return ""
+}
+
+func (x *Swagger) GetInfo() *Info {
+ if x != nil {
+ return x.xxx_hidden_Info
+ }
+ return nil
+}
+
+func (x *Swagger) GetHost() string {
+ if x != nil {
+ return x.xxx_hidden_Host
+ }
+ return ""
+}
+
+func (x *Swagger) GetBasePath() string {
+ if x != nil {
+ return x.xxx_hidden_BasePath
+ }
+ return ""
+}
+
+func (x *Swagger) GetSchemes() []Scheme {
+ if x != nil {
+ return x.xxx_hidden_Schemes
+ }
+ return nil
+}
+
+func (x *Swagger) GetConsumes() []string {
+ if x != nil {
+ return x.xxx_hidden_Consumes
+ }
+ return nil
+}
+
+func (x *Swagger) GetProduces() []string {
+ if x != nil {
+ return x.xxx_hidden_Produces
+ }
+ return nil
+}
+
+func (x *Swagger) GetResponses() map[string]*Response {
+ if x != nil {
+ return x.xxx_hidden_Responses
+ }
+ return nil
+}
+
+func (x *Swagger) GetSecurityDefinitions() *SecurityDefinitions {
+ if x != nil {
+ return x.xxx_hidden_SecurityDefinitions
+ }
+ return nil
+}
+
+func (x *Swagger) GetSecurity() []*SecurityRequirement {
+ if x != nil {
+ if x.xxx_hidden_Security != nil {
+ return *x.xxx_hidden_Security
+ }
+ }
+ return nil
+}
+
+func (x *Swagger) GetTags() []*Tag {
+ if x != nil {
+ if x.xxx_hidden_Tags != nil {
+ return *x.xxx_hidden_Tags
+ }
+ }
+ return nil
+}
+
+func (x *Swagger) GetExternalDocs() *ExternalDocumentation {
+ if x != nil {
+ return x.xxx_hidden_ExternalDocs
+ }
+ return nil
+}
+
+func (x *Swagger) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.xxx_hidden_Extensions
+ }
+ return nil
+}
+
+func (x *Swagger) SetSwagger(v string) {
+ x.xxx_hidden_Swagger = v
+}
+
+func (x *Swagger) SetInfo(v *Info) {
+ x.xxx_hidden_Info = v
+}
+
+func (x *Swagger) SetHost(v string) {
+ x.xxx_hidden_Host = v
+}
+
+func (x *Swagger) SetBasePath(v string) {
+ x.xxx_hidden_BasePath = v
+}
+
+func (x *Swagger) SetSchemes(v []Scheme) {
+ x.xxx_hidden_Schemes = v
+}
+
+func (x *Swagger) SetConsumes(v []string) {
+ x.xxx_hidden_Consumes = v
+}
+
+func (x *Swagger) SetProduces(v []string) {
+ x.xxx_hidden_Produces = v
+}
+
+func (x *Swagger) SetResponses(v map[string]*Response) {
+ x.xxx_hidden_Responses = v
+}
+
+func (x *Swagger) SetSecurityDefinitions(v *SecurityDefinitions) {
+ x.xxx_hidden_SecurityDefinitions = v
+}
+
+func (x *Swagger) SetSecurity(v []*SecurityRequirement) {
+ x.xxx_hidden_Security = &v
+}
+
+func (x *Swagger) SetTags(v []*Tag) {
+ x.xxx_hidden_Tags = &v
+}
+
+func (x *Swagger) SetExternalDocs(v *ExternalDocumentation) {
+ x.xxx_hidden_ExternalDocs = v
+}
+
+func (x *Swagger) SetExtensions(v map[string]*structpb.Value) {
+ x.xxx_hidden_Extensions = v
+}
+
+func (x *Swagger) HasInfo() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_Info != nil
+}
+
+func (x *Swagger) HasSecurityDefinitions() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_SecurityDefinitions != nil
+}
+
+func (x *Swagger) HasExternalDocs() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_ExternalDocs != nil
+}
+
+func (x *Swagger) ClearInfo() {
+ x.xxx_hidden_Info = nil
+}
+
+func (x *Swagger) ClearSecurityDefinitions() {
+ x.xxx_hidden_SecurityDefinitions = nil
+}
+
+func (x *Swagger) ClearExternalDocs() {
+ x.xxx_hidden_ExternalDocs = nil
+}
+
+type Swagger_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // Specifies the OpenAPI Specification version being used. It can be
+ // used by the OpenAPI UI and other clients to interpret the API listing. The
+ // value MUST be "2.0".
+ Swagger string
+ // Provides metadata about the API. The metadata can be used by the
+ // clients if needed.
+ Info *Info
+ // The host (name or ip) serving the API. This MUST be the host only and does
+ // not include the scheme nor sub-paths. It MAY include a port. If the host is
+ // not included, the host serving the documentation is to be used (including
+ // the port). The host does not support path templating.
+ Host string
+ // The base path on which the API is served, which is relative to the host. If
+ // it is not included, the API is served directly under the host. The value
+ // MUST start with a leading slash (/). The basePath does not support path
+ // templating.
+ // Note that using `base_path` does not change the endpoint paths that are
+ // generated in the resulting OpenAPI file. If you wish to use `base_path`
+ // with relatively generated OpenAPI paths, the `base_path` prefix must be
+ // manually removed from your `google.api.http` paths and your code changed to
+ // serve the API from the `base_path`.
+ BasePath string
+ // The transfer protocol of the API. Values MUST be from the list: "http",
+ // "https", "ws", "wss". If the schemes is not included, the default scheme to
+ // be used is the one used to access the OpenAPI definition itself.
+ Schemes []Scheme
+ // A list of MIME types the APIs can consume. This is global to all APIs but
+ // can be overridden on specific API calls. Value MUST be as described under
+ // Mime Types.
+ Consumes []string
+ // A list of MIME types the APIs can produce. This is global to all APIs but
+ // can be overridden on specific API calls. Value MUST be as described under
+ // Mime Types.
+ Produces []string
+ // An object to hold responses that can be used across operations. This
+ // property does not define global responses for all operations.
+ Responses map[string]*Response
+ // Security scheme definitions that can be used across the specification.
+ SecurityDefinitions *SecurityDefinitions
+ // A declaration of which security schemes are applied for the API as a whole.
+ // The list of values describes alternative security schemes that can be used
+ // (that is, there is a logical OR between the security requirements).
+ // Individual operations can override this definition.
+ Security []*SecurityRequirement
+ // A list of tags for API documentation control. Tags can be used for logical
+ // grouping of operations by resources or any other qualifier.
+ Tags []*Tag
+ // Additional external documentation.
+ ExternalDocs *ExternalDocumentation
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 Swagger_builder) Build() *Swagger {
+ m0 := &Swagger{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Swagger = b.Swagger
+ x.xxx_hidden_Info = b.Info
+ x.xxx_hidden_Host = b.Host
+ x.xxx_hidden_BasePath = b.BasePath
+ x.xxx_hidden_Schemes = b.Schemes
+ x.xxx_hidden_Consumes = b.Consumes
+ x.xxx_hidden_Produces = b.Produces
+ x.xxx_hidden_Responses = b.Responses
+ x.xxx_hidden_SecurityDefinitions = b.SecurityDefinitions
+ x.xxx_hidden_Security = &b.Security
+ x.xxx_hidden_Tags = &b.Tags
+ x.xxx_hidden_ExternalDocs = b.ExternalDocs
+ x.xxx_hidden_Extensions = b.Extensions
+ return m0
+}
+
+// `Operation` is a representation of OpenAPI v2 specification's Operation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject
+//
+// Example:
+//
+// service EchoService {
+// rpc Echo(SimpleMessage) returns (SimpleMessage) {
+// option (google.api.http) = {
+// get: "/v1/example/echo/{id}"
+// };
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = {
+// summary: "Get a message.";
+// operation_id: "getMessage";
+// tags: "echo";
+// responses: {
+// key: "200"
+// value: {
+// description: "OK";
+// }
+// }
+// };
+// }
+// }
+type Operation struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"`
+ xxx_hidden_Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"`
+ xxx_hidden_Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+ xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ xxx_hidden_OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"`
+ xxx_hidden_Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"`
+ xxx_hidden_Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"`
+ xxx_hidden_Responses map[string]*Response `protobuf:"bytes,9,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ xxx_hidden_Schemes []Scheme `protobuf:"varint,10,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"`
+ xxx_hidden_Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"`
+ xxx_hidden_Security *[]*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"`
+ xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,13,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ xxx_hidden_Parameters *Parameters `protobuf:"bytes,14,opt,name=parameters,proto3" json:"parameters,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Operation) Reset() {
+ *x = Operation{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Operation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Operation) ProtoMessage() {}
+
+func (x *Operation) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Operation) GetTags() []string {
+ if x != nil {
+ return x.xxx_hidden_Tags
+ }
+ return nil
+}
+
+func (x *Operation) GetSummary() string {
+ if x != nil {
+ return x.xxx_hidden_Summary
+ }
+ return ""
+}
+
+func (x *Operation) GetDescription() string {
+ if x != nil {
+ return x.xxx_hidden_Description
+ }
+ return ""
+}
+
+func (x *Operation) GetExternalDocs() *ExternalDocumentation {
+ if x != nil {
+ return x.xxx_hidden_ExternalDocs
+ }
+ return nil
+}
+
+func (x *Operation) GetOperationId() string {
+ if x != nil {
+ return x.xxx_hidden_OperationId
+ }
+ return ""
+}
+
+func (x *Operation) GetConsumes() []string {
+ if x != nil {
+ return x.xxx_hidden_Consumes
+ }
+ return nil
+}
+
+func (x *Operation) GetProduces() []string {
+ if x != nil {
+ return x.xxx_hidden_Produces
+ }
+ return nil
+}
+
+func (x *Operation) GetResponses() map[string]*Response {
+ if x != nil {
+ return x.xxx_hidden_Responses
+ }
+ return nil
+}
+
+func (x *Operation) GetSchemes() []Scheme {
+ if x != nil {
+ return x.xxx_hidden_Schemes
+ }
+ return nil
+}
+
+func (x *Operation) GetDeprecated() bool {
+ if x != nil {
+ return x.xxx_hidden_Deprecated
+ }
+ return false
+}
+
+func (x *Operation) GetSecurity() []*SecurityRequirement {
+ if x != nil {
+ if x.xxx_hidden_Security != nil {
+ return *x.xxx_hidden_Security
+ }
+ }
+ return nil
+}
+
+func (x *Operation) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.xxx_hidden_Extensions
+ }
+ return nil
+}
+
+func (x *Operation) GetParameters() *Parameters {
+ if x != nil {
+ return x.xxx_hidden_Parameters
+ }
+ return nil
+}
+
+func (x *Operation) SetTags(v []string) {
+ x.xxx_hidden_Tags = v
+}
+
+func (x *Operation) SetSummary(v string) {
+ x.xxx_hidden_Summary = v
+}
+
+func (x *Operation) SetDescription(v string) {
+ x.xxx_hidden_Description = v
+}
+
+func (x *Operation) SetExternalDocs(v *ExternalDocumentation) {
+ x.xxx_hidden_ExternalDocs = v
+}
+
+func (x *Operation) SetOperationId(v string) {
+ x.xxx_hidden_OperationId = v
+}
+
+func (x *Operation) SetConsumes(v []string) {
+ x.xxx_hidden_Consumes = v
+}
+
+func (x *Operation) SetProduces(v []string) {
+ x.xxx_hidden_Produces = v
+}
+
+func (x *Operation) SetResponses(v map[string]*Response) {
+ x.xxx_hidden_Responses = v
+}
+
+func (x *Operation) SetSchemes(v []Scheme) {
+ x.xxx_hidden_Schemes = v
+}
+
+func (x *Operation) SetDeprecated(v bool) {
+ x.xxx_hidden_Deprecated = v
+}
+
+func (x *Operation) SetSecurity(v []*SecurityRequirement) {
+ x.xxx_hidden_Security = &v
+}
+
+func (x *Operation) SetExtensions(v map[string]*structpb.Value) {
+ x.xxx_hidden_Extensions = v
+}
+
+func (x *Operation) SetParameters(v *Parameters) {
+ x.xxx_hidden_Parameters = v
+}
+
+func (x *Operation) HasExternalDocs() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_ExternalDocs != nil
+}
+
+func (x *Operation) HasParameters() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_Parameters != nil
+}
+
+func (x *Operation) ClearExternalDocs() {
+ x.xxx_hidden_ExternalDocs = nil
+}
+
+func (x *Operation) ClearParameters() {
+ x.xxx_hidden_Parameters = nil
+}
+
+type Operation_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // A list of tags for API documentation control. Tags can be used for logical
+ // grouping of operations by resources or any other qualifier.
+ Tags []string
+ // A short summary of what the operation does. For maximum readability in the
+ // swagger-ui, this field SHOULD be less than 120 characters.
+ Summary string
+ // A verbose explanation of the operation behavior. GFM syntax can be used for
+ // rich text representation.
+ Description string
+ // Additional external documentation for this operation.
+ ExternalDocs *ExternalDocumentation
+ // Unique string used to identify the operation. The id MUST be unique among
+ // all operations described in the API. Tools and libraries MAY use the
+ // operationId to uniquely identify an operation, therefore, it is recommended
+ // to follow common programming naming conventions.
+ OperationId string
+ // A list of MIME types the operation can consume. This overrides the consumes
+ // definition at the OpenAPI Object. An empty value MAY be used to clear the
+ // global definition. Value MUST be as described under Mime Types.
+ Consumes []string
+ // A list of MIME types the operation can produce. This overrides the produces
+ // definition at the OpenAPI Object. An empty value MAY be used to clear the
+ // global definition. Value MUST be as described under Mime Types.
+ Produces []string
+ // The list of possible responses as they are returned from executing this
+ // operation.
+ Responses map[string]*Response
+ // The transfer protocol for the operation. Values MUST be from the list:
+ // "http", "https", "ws", "wss". The value overrides the OpenAPI Object
+ // schemes definition.
+ Schemes []Scheme
+ // Declares this operation to be deprecated. Usage of the declared operation
+ // should be refrained. Default value is false.
+ Deprecated bool
+ // A declaration of which security schemes are applied for this operation. The
+ // list of values describes alternative security schemes that can be used
+ // (that is, there is a logical OR between the security requirements). This
+ // definition overrides any declared top-level security. To remove a top-level
+ // security declaration, an empty array can be used.
+ Security []*SecurityRequirement
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+ // Custom parameters such as HTTP request headers.
+ // See: https://swagger.io/docs/specification/2-0/describing-parameters/
+ // and https://swagger.io/specification/v2/#parameter-object.
+ Parameters *Parameters
+}
+
+func (b0 Operation_builder) Build() *Operation {
+ m0 := &Operation{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Tags = b.Tags
+ x.xxx_hidden_Summary = b.Summary
+ x.xxx_hidden_Description = b.Description
+ x.xxx_hidden_ExternalDocs = b.ExternalDocs
+ x.xxx_hidden_OperationId = b.OperationId
+ x.xxx_hidden_Consumes = b.Consumes
+ x.xxx_hidden_Produces = b.Produces
+ x.xxx_hidden_Responses = b.Responses
+ x.xxx_hidden_Schemes = b.Schemes
+ x.xxx_hidden_Deprecated = b.Deprecated
+ x.xxx_hidden_Security = &b.Security
+ x.xxx_hidden_Extensions = b.Extensions
+ x.xxx_hidden_Parameters = b.Parameters
+ return m0
+}
+
+// `Parameters` is a representation of OpenAPI v2 specification's parameters object.
+// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only
+// allow header parameters to be set here since we do not want users specifying custom non-header
+// parameters beyond those inferred from the Protobuf schema.
+// See: https://swagger.io/specification/v2/#parameter-object
+type Parameters struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Headers *[]*HeaderParameter `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Parameters) Reset() {
+ *x = Parameters{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Parameters) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Parameters) ProtoMessage() {}
+
+func (x *Parameters) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Parameters) GetHeaders() []*HeaderParameter {
+ if x != nil {
+ if x.xxx_hidden_Headers != nil {
+ return *x.xxx_hidden_Headers
+ }
+ }
+ return nil
+}
+
+func (x *Parameters) SetHeaders(v []*HeaderParameter) {
+ x.xxx_hidden_Headers = &v
+}
+
+type Parameters_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // `Headers` is one or more HTTP header parameter.
+ // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters
+ Headers []*HeaderParameter
+}
+
+func (b0 Parameters_builder) Build() *Parameters {
+ m0 := &Parameters{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Headers = &b.Headers
+ return m0
+}
+
+// `HeaderParameter` a HTTP header parameter.
+// See: https://swagger.io/specification/v2/#parameter-object
+type HeaderParameter struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ xxx_hidden_Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ xxx_hidden_Type HeaderParameter_Type `protobuf:"varint,3,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter_Type" json:"type,omitempty"`
+ xxx_hidden_Format string `protobuf:"bytes,4,opt,name=format,proto3" json:"format,omitempty"`
+ xxx_hidden_Required bool `protobuf:"varint,5,opt,name=required,proto3" json:"required,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *HeaderParameter) Reset() {
+ *x = HeaderParameter{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *HeaderParameter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HeaderParameter) ProtoMessage() {}
+
+func (x *HeaderParameter) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *HeaderParameter) GetName() string {
+ if x != nil {
+ return x.xxx_hidden_Name
+ }
+ return ""
+}
+
+func (x *HeaderParameter) GetDescription() string {
+ if x != nil {
+ return x.xxx_hidden_Description
+ }
+ return ""
+}
+
+func (x *HeaderParameter) GetType() HeaderParameter_Type {
+ if x != nil {
+ return x.xxx_hidden_Type
+ }
+ return HeaderParameter_UNKNOWN
+}
+
+func (x *HeaderParameter) GetFormat() string {
+ if x != nil {
+ return x.xxx_hidden_Format
+ }
+ return ""
+}
+
+func (x *HeaderParameter) GetRequired() bool {
+ if x != nil {
+ return x.xxx_hidden_Required
+ }
+ return false
+}
+
+func (x *HeaderParameter) SetName(v string) {
+ x.xxx_hidden_Name = v
+}
+
+func (x *HeaderParameter) SetDescription(v string) {
+ x.xxx_hidden_Description = v
+}
+
+func (x *HeaderParameter) SetType(v HeaderParameter_Type) {
+ x.xxx_hidden_Type = v
+}
+
+func (x *HeaderParameter) SetFormat(v string) {
+ x.xxx_hidden_Format = v
+}
+
+func (x *HeaderParameter) SetRequired(v bool) {
+ x.xxx_hidden_Required = v
+}
+
+type HeaderParameter_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // `Name` is the header name.
+ Name string
+ // `Description` is a short description of the header.
+ Description string
+ // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+ // See: https://swagger.io/specification/v2/#parameterType.
+ Type HeaderParameter_Type
+ // `Format` The extending format for the previously mentioned type.
+ Format string
+ // `Required` indicates if the header is optional
+ Required bool
+}
+
+func (b0 HeaderParameter_builder) Build() *HeaderParameter {
+ m0 := &HeaderParameter{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Name = b.Name
+ x.xxx_hidden_Description = b.Description
+ x.xxx_hidden_Type = b.Type
+ x.xxx_hidden_Format = b.Format
+ x.xxx_hidden_Required = b.Required
+ return m0
+}
+
+// `Header` is a representation of OpenAPI v2 specification's Header object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject
+type Header struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ xxx_hidden_Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
+ xxx_hidden_Format string `protobuf:"bytes,3,opt,name=format,proto3" json:"format,omitempty"`
+ xxx_hidden_Default string `protobuf:"bytes,6,opt,name=default,proto3" json:"default,omitempty"`
+ xxx_hidden_Pattern string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Header) Reset() {
+ *x = Header{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Header) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Header) ProtoMessage() {}
+
+func (x *Header) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Header) GetDescription() string {
+ if x != nil {
+ return x.xxx_hidden_Description
+ }
+ return ""
+}
+
+func (x *Header) GetType() string {
+ if x != nil {
+ return x.xxx_hidden_Type
+ }
+ return ""
+}
+
+func (x *Header) GetFormat() string {
+ if x != nil {
+ return x.xxx_hidden_Format
+ }
+ return ""
+}
+
+func (x *Header) GetDefault() string {
+ if x != nil {
+ return x.xxx_hidden_Default
+ }
+ return ""
+}
+
+func (x *Header) GetPattern() string {
+ if x != nil {
+ return x.xxx_hidden_Pattern
+ }
+ return ""
+}
+
+func (x *Header) SetDescription(v string) {
+ x.xxx_hidden_Description = v
+}
+
+func (x *Header) SetType(v string) {
+ x.xxx_hidden_Type = v
+}
+
+func (x *Header) SetFormat(v string) {
+ x.xxx_hidden_Format = v
+}
+
+func (x *Header) SetDefault(v string) {
+ x.xxx_hidden_Default = v
+}
+
+func (x *Header) SetPattern(v string) {
+ x.xxx_hidden_Pattern = v
+}
+
+type Header_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // `Description` is a short description of the header.
+ Description string
+ // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+ Type string
+ // `Format` The extending format for the previously mentioned type.
+ Format string
+ // `Default` Declares the value of the header that the server will use if none is provided.
+ // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2.
+ // Unlike JSON Schema this value MUST conform to the defined type for the header.
+ Default string
+ // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3.
+ Pattern string
+}
+
+func (b0 Header_builder) Build() *Header {
+ m0 := &Header{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Description = b.Description
+ x.xxx_hidden_Type = b.Type
+ x.xxx_hidden_Format = b.Format
+ x.xxx_hidden_Default = b.Default
+ x.xxx_hidden_Pattern = b.Pattern
+ return m0
+}
+
+// `Response` is a representation of OpenAPI v2 specification's Response object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject
+type Response struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ xxx_hidden_Schema *Schema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"`
+ xxx_hidden_Headers map[string]*Header `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ xxx_hidden_Examples map[string]string `protobuf:"bytes,4,rep,name=examples,proto3" json:"examples,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,5,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Response) Reset() {
+ *x = Response{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Response) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Response) ProtoMessage() {}
+
+func (x *Response) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Response) GetDescription() string {
+ if x != nil {
+ return x.xxx_hidden_Description
+ }
+ return ""
+}
+
+func (x *Response) GetSchema() *Schema {
+ if x != nil {
+ return x.xxx_hidden_Schema
+ }
+ return nil
+}
+
+func (x *Response) GetHeaders() map[string]*Header {
+ if x != nil {
+ return x.xxx_hidden_Headers
+ }
+ return nil
+}
+
+func (x *Response) GetExamples() map[string]string {
+ if x != nil {
+ return x.xxx_hidden_Examples
+ }
+ return nil
+}
+
+func (x *Response) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.xxx_hidden_Extensions
+ }
+ return nil
+}
+
+func (x *Response) SetDescription(v string) {
+ x.xxx_hidden_Description = v
+}
+
+func (x *Response) SetSchema(v *Schema) {
+ x.xxx_hidden_Schema = v
+}
+
+func (x *Response) SetHeaders(v map[string]*Header) {
+ x.xxx_hidden_Headers = v
+}
+
+func (x *Response) SetExamples(v map[string]string) {
+ x.xxx_hidden_Examples = v
+}
+
+func (x *Response) SetExtensions(v map[string]*structpb.Value) {
+ x.xxx_hidden_Extensions = v
+}
+
+func (x *Response) HasSchema() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_Schema != nil
+}
+
+func (x *Response) ClearSchema() {
+ x.xxx_hidden_Schema = nil
+}
+
+type Response_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // `Description` is a short description of the response.
+ // GFM syntax can be used for rich text representation.
+ Description string
+ // `Schema` optionally defines the structure of the response.
+ // If `Schema` is not provided, it means there is no content to the response.
+ Schema *Schema
+ // `Headers` A list of headers that are sent with the response.
+ // `Header` name is expected to be a string in the canonical format of the MIME header key
+ // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey
+ Headers map[string]*Header
+ // `Examples` gives per-mimetype response examples.
+ // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object
+ Examples map[string]string
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 Response_builder) Build() *Response {
+ m0 := &Response{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Description = b.Description
+ x.xxx_hidden_Schema = b.Schema
+ x.xxx_hidden_Headers = b.Headers
+ x.xxx_hidden_Examples = b.Examples
+ x.xxx_hidden_Extensions = b.Extensions
+ return m0
+}
+
+// `Info` is a representation of OpenAPI v2 specification's Info object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// title: "Echo API";
+// version: "1.0";
+// description: "";
+// contact: {
+// name: "gRPC-Gateway project";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// email: "none@example.com";
+// };
+// license: {
+// name: "BSD 3-Clause License";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+// };
+// };
+// ...
+// };
+type Info struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"`
+ xxx_hidden_Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ xxx_hidden_TermsOfService string `protobuf:"bytes,3,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"`
+ xxx_hidden_Contact *Contact `protobuf:"bytes,4,opt,name=contact,proto3" json:"contact,omitempty"`
+ xxx_hidden_License *License `protobuf:"bytes,5,opt,name=license,proto3" json:"license,omitempty"`
+ xxx_hidden_Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"`
+ xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Info) Reset() {
+ *x = Info{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Info) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Info) ProtoMessage() {}
+
+func (x *Info) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Info) GetTitle() string {
+ if x != nil {
+ return x.xxx_hidden_Title
+ }
+ return ""
+}
+
+func (x *Info) GetDescription() string {
+ if x != nil {
+ return x.xxx_hidden_Description
+ }
+ return ""
+}
+
+func (x *Info) GetTermsOfService() string {
+ if x != nil {
+ return x.xxx_hidden_TermsOfService
+ }
+ return ""
+}
+
+func (x *Info) GetContact() *Contact {
+ if x != nil {
+ return x.xxx_hidden_Contact
+ }
+ return nil
+}
+
+func (x *Info) GetLicense() *License {
+ if x != nil {
+ return x.xxx_hidden_License
+ }
+ return nil
+}
+
+func (x *Info) GetVersion() string {
+ if x != nil {
+ return x.xxx_hidden_Version
+ }
+ return ""
+}
+
+func (x *Info) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.xxx_hidden_Extensions
+ }
+ return nil
+}
+
+func (x *Info) SetTitle(v string) {
+ x.xxx_hidden_Title = v
+}
+
+func (x *Info) SetDescription(v string) {
+ x.xxx_hidden_Description = v
+}
+
+func (x *Info) SetTermsOfService(v string) {
+ x.xxx_hidden_TermsOfService = v
+}
+
+func (x *Info) SetContact(v *Contact) {
+ x.xxx_hidden_Contact = v
+}
+
+func (x *Info) SetLicense(v *License) {
+ x.xxx_hidden_License = v
+}
+
+func (x *Info) SetVersion(v string) {
+ x.xxx_hidden_Version = v
+}
+
+func (x *Info) SetExtensions(v map[string]*structpb.Value) {
+ x.xxx_hidden_Extensions = v
+}
+
+func (x *Info) HasContact() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_Contact != nil
+}
+
+func (x *Info) HasLicense() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_License != nil
+}
+
+func (x *Info) ClearContact() {
+ x.xxx_hidden_Contact = nil
+}
+
+func (x *Info) ClearLicense() {
+ x.xxx_hidden_License = nil
+}
+
+type Info_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // The title of the application.
+ Title string
+ // A short description of the application. GFM syntax can be used for rich
+ // text representation.
+ Description string
+ // The Terms of Service for the API.
+ TermsOfService string
+ // The contact information for the exposed API.
+ Contact *Contact
+ // The license information for the exposed API.
+ License *License
+ // Provides the version of the application API (not to be confused
+ // with the specification version).
+ Version string
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 Info_builder) Build() *Info {
+ m0 := &Info{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Title = b.Title
+ x.xxx_hidden_Description = b.Description
+ x.xxx_hidden_TermsOfService = b.TermsOfService
+ x.xxx_hidden_Contact = b.Contact
+ x.xxx_hidden_License = b.License
+ x.xxx_hidden_Version = b.Version
+ x.xxx_hidden_Extensions = b.Extensions
+ return m0
+}
+
+// `Contact` is a representation of OpenAPI v2 specification's Contact object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// ...
+// contact: {
+// name: "gRPC-Gateway project";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// email: "none@example.com";
+// };
+// ...
+// };
+// ...
+// };
+type Contact struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ xxx_hidden_Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+ xxx_hidden_Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Contact) Reset() {
+ *x = Contact{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Contact) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Contact) ProtoMessage() {}
+
+func (x *Contact) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Contact) GetName() string {
+ if x != nil {
+ return x.xxx_hidden_Name
+ }
+ return ""
+}
+
+func (x *Contact) GetUrl() string {
+ if x != nil {
+ return x.xxx_hidden_Url
+ }
+ return ""
+}
+
+func (x *Contact) GetEmail() string {
+ if x != nil {
+ return x.xxx_hidden_Email
+ }
+ return ""
+}
+
+func (x *Contact) SetName(v string) {
+ x.xxx_hidden_Name = v
+}
+
+func (x *Contact) SetUrl(v string) {
+ x.xxx_hidden_Url = v
+}
+
+func (x *Contact) SetEmail(v string) {
+ x.xxx_hidden_Email = v
+}
+
+type Contact_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // The identifying name of the contact person/organization.
+ Name string
+ // The URL pointing to the contact information. MUST be in the format of a
+ // URL.
+ Url string
+ // The email address of the contact person/organization. MUST be in the format
+ // of an email address.
+ Email string
+}
+
+func (b0 Contact_builder) Build() *Contact {
+ m0 := &Contact{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Name = b.Name
+ x.xxx_hidden_Url = b.Url
+ x.xxx_hidden_Email = b.Email
+ return m0
+}
+
+// `License` is a representation of OpenAPI v2 specification's License object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// ...
+// license: {
+// name: "BSD 3-Clause License";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+// };
+// ...
+// };
+// ...
+// };
+type License struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ xxx_hidden_Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *License) Reset() {
+ *x = License{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *License) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*License) ProtoMessage() {}
+
+func (x *License) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *License) GetName() string {
+ if x != nil {
+ return x.xxx_hidden_Name
+ }
+ return ""
+}
+
+func (x *License) GetUrl() string {
+ if x != nil {
+ return x.xxx_hidden_Url
+ }
+ return ""
+}
+
+func (x *License) SetName(v string) {
+ x.xxx_hidden_Name = v
+}
+
+func (x *License) SetUrl(v string) {
+ x.xxx_hidden_Url = v
+}
+
+type License_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // The license name used for the API.
+ Name string
+ // A URL to the license used for the API. MUST be in the format of a URL.
+ Url string
+}
+
+func (b0 License_builder) Build() *License {
+ m0 := &License{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Name = b.Name
+ x.xxx_hidden_Url = b.Url
+ return m0
+}
+
+// `ExternalDocumentation` is a representation of OpenAPI v2 specification's
+// ExternalDocumentation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// ...
+// external_docs: {
+// description: "More about gRPC-Gateway";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// }
+// ...
+// };
+type ExternalDocumentation struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ xxx_hidden_Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ExternalDocumentation) Reset() {
+ *x = ExternalDocumentation{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ExternalDocumentation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExternalDocumentation) ProtoMessage() {}
+
+func (x *ExternalDocumentation) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *ExternalDocumentation) GetDescription() string {
+ if x != nil {
+ return x.xxx_hidden_Description
+ }
+ return ""
+}
+
+func (x *ExternalDocumentation) GetUrl() string {
+ if x != nil {
+ return x.xxx_hidden_Url
+ }
+ return ""
+}
+
+func (x *ExternalDocumentation) SetDescription(v string) {
+ x.xxx_hidden_Description = v
+}
+
+func (x *ExternalDocumentation) SetUrl(v string) {
+ x.xxx_hidden_Url = v
+}
+
+type ExternalDocumentation_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // A short description of the target documentation. GFM syntax can be used for
+ // rich text representation.
+ Description string
+ // The URL for the target documentation. Value MUST be in the format
+ // of a URL.
+ Url string
+}
+
+func (b0 ExternalDocumentation_builder) Build() *ExternalDocumentation {
+ m0 := &ExternalDocumentation{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Description = b.Description
+ x.xxx_hidden_Url = b.Url
+ return m0
+}
+
+// `Schema` is a representation of OpenAPI v2 specification's Schema object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+type Schema struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_JsonSchema *JSONSchema `protobuf:"bytes,1,opt,name=json_schema,json=jsonSchema,proto3" json:"json_schema,omitempty"`
+ xxx_hidden_Discriminator string `protobuf:"bytes,2,opt,name=discriminator,proto3" json:"discriminator,omitempty"`
+ xxx_hidden_ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+ xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,5,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ xxx_hidden_Example string `protobuf:"bytes,6,opt,name=example,proto3" json:"example,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Schema) Reset() {
+ *x = Schema{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Schema) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Schema) ProtoMessage() {}
+
+func (x *Schema) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Schema) GetJsonSchema() *JSONSchema {
+ if x != nil {
+ return x.xxx_hidden_JsonSchema
+ }
+ return nil
+}
+
+func (x *Schema) GetDiscriminator() string {
+ if x != nil {
+ return x.xxx_hidden_Discriminator
+ }
+ return ""
+}
+
+func (x *Schema) GetReadOnly() bool {
+ if x != nil {
+ return x.xxx_hidden_ReadOnly
+ }
+ return false
+}
+
+func (x *Schema) GetExternalDocs() *ExternalDocumentation {
+ if x != nil {
+ return x.xxx_hidden_ExternalDocs
+ }
+ return nil
+}
+
+func (x *Schema) GetExample() string {
+ if x != nil {
+ return x.xxx_hidden_Example
+ }
+ return ""
+}
+
+func (x *Schema) SetJsonSchema(v *JSONSchema) {
+ x.xxx_hidden_JsonSchema = v
+}
+
+func (x *Schema) SetDiscriminator(v string) {
+ x.xxx_hidden_Discriminator = v
+}
+
+func (x *Schema) SetReadOnly(v bool) {
+ x.xxx_hidden_ReadOnly = v
+}
+
+func (x *Schema) SetExternalDocs(v *ExternalDocumentation) {
+ x.xxx_hidden_ExternalDocs = v
+}
+
+func (x *Schema) SetExample(v string) {
+ x.xxx_hidden_Example = v
+}
+
+func (x *Schema) HasJsonSchema() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_JsonSchema != nil
+}
+
+func (x *Schema) HasExternalDocs() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_ExternalDocs != nil
+}
+
+func (x *Schema) ClearJsonSchema() {
+ x.xxx_hidden_JsonSchema = nil
+}
+
+func (x *Schema) ClearExternalDocs() {
+ x.xxx_hidden_ExternalDocs = nil
+}
+
+type Schema_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ JsonSchema *JSONSchema
+ // Adds support for polymorphism. The discriminator is the schema property
+ // name that is used to differentiate between other schema that inherit this
+ // schema. The property name used MUST be defined at this schema and it MUST
+ // be in the required property list. When used, the value MUST be the name of
+ // this schema or any schema that inherits it.
+ Discriminator string
+ // Relevant only for Schema "properties" definitions. Declares the property as
+ // "read only". This means that it MAY be sent as part of a response but MUST
+ // NOT be sent as part of the request. Properties marked as readOnly being
+ // true SHOULD NOT be in the required list of the defined schema. Default
+ // value is false.
+ ReadOnly bool
+ // Additional external documentation for this schema.
+ ExternalDocs *ExternalDocumentation
+ // A free-form property to include an example of an instance for this schema in JSON.
+ // This is copied verbatim to the output.
+ Example string
+}
+
+func (b0 Schema_builder) Build() *Schema {
+ m0 := &Schema{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_JsonSchema = b.JsonSchema
+ x.xxx_hidden_Discriminator = b.Discriminator
+ x.xxx_hidden_ReadOnly = b.ReadOnly
+ x.xxx_hidden_ExternalDocs = b.ExternalDocs
+ x.xxx_hidden_Example = b.Example
+ return m0
+}
+
+// `EnumSchema` is subset of fields from the OpenAPI v2 specification's Schema object.
+// Only fields that are applicable to Enums are included
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum) = {
+// ...
+// title: "MyEnum";
+// description:"This is my nice enum";
+// example: "ZERO";
+// required: true;
+// ...
+// };
+type EnumSchema struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ xxx_hidden_Default string `protobuf:"bytes,2,opt,name=default,proto3" json:"default,omitempty"`
+ xxx_hidden_Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"`
+ xxx_hidden_Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"`
+ xxx_hidden_ReadOnly bool `protobuf:"varint,5,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+ xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,6,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ xxx_hidden_Example string `protobuf:"bytes,7,opt,name=example,proto3" json:"example,omitempty"`
+ xxx_hidden_Ref string `protobuf:"bytes,8,opt,name=ref,proto3" json:"ref,omitempty"`
+ xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *EnumSchema) Reset() {
+ *x = EnumSchema{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *EnumSchema) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumSchema) ProtoMessage() {}
+
+func (x *EnumSchema) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *EnumSchema) GetDescription() string {
+ if x != nil {
+ return x.xxx_hidden_Description
+ }
+ return ""
+}
+
+func (x *EnumSchema) GetDefault() string {
+ if x != nil {
+ return x.xxx_hidden_Default
+ }
+ return ""
+}
+
+func (x *EnumSchema) GetTitle() string {
+ if x != nil {
+ return x.xxx_hidden_Title
+ }
+ return ""
+}
+
+func (x *EnumSchema) GetRequired() bool {
+ if x != nil {
+ return x.xxx_hidden_Required
+ }
+ return false
+}
+
+func (x *EnumSchema) GetReadOnly() bool {
+ if x != nil {
+ return x.xxx_hidden_ReadOnly
+ }
+ return false
+}
+
+func (x *EnumSchema) GetExternalDocs() *ExternalDocumentation {
+ if x != nil {
+ return x.xxx_hidden_ExternalDocs
+ }
+ return nil
+}
+
+func (x *EnumSchema) GetExample() string {
+ if x != nil {
+ return x.xxx_hidden_Example
+ }
+ return ""
+}
+
+func (x *EnumSchema) GetRef() string {
+ if x != nil {
+ return x.xxx_hidden_Ref
+ }
+ return ""
+}
+
+func (x *EnumSchema) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.xxx_hidden_Extensions
+ }
+ return nil
+}
+
+func (x *EnumSchema) SetDescription(v string) {
+ x.xxx_hidden_Description = v
+}
+
+func (x *EnumSchema) SetDefault(v string) {
+ x.xxx_hidden_Default = v
+}
+
+func (x *EnumSchema) SetTitle(v string) {
+ x.xxx_hidden_Title = v
+}
+
+func (x *EnumSchema) SetRequired(v bool) {
+ x.xxx_hidden_Required = v
+}
+
+func (x *EnumSchema) SetReadOnly(v bool) {
+ x.xxx_hidden_ReadOnly = v
+}
+
+func (x *EnumSchema) SetExternalDocs(v *ExternalDocumentation) {
+ x.xxx_hidden_ExternalDocs = v
+}
+
+func (x *EnumSchema) SetExample(v string) {
+ x.xxx_hidden_Example = v
+}
+
+func (x *EnumSchema) SetRef(v string) {
+ x.xxx_hidden_Ref = v
+}
+
+func (x *EnumSchema) SetExtensions(v map[string]*structpb.Value) {
+ x.xxx_hidden_Extensions = v
+}
+
+func (x *EnumSchema) HasExternalDocs() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_ExternalDocs != nil
+}
+
+func (x *EnumSchema) ClearExternalDocs() {
+ x.xxx_hidden_ExternalDocs = nil
+}
+
+type EnumSchema_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // A short description of the schema.
+ Description string
+ Default string
+ // The title of the schema.
+ Title string
+ Required bool
+ ReadOnly bool
+ // Additional external documentation for this schema.
+ ExternalDocs *ExternalDocumentation
+ Example string
+ // Ref is used to define an external reference to include in the message.
+ // This could be a fully qualified proto message reference, and that type must
+ // be imported into the protofile. If no message is identified, the Ref will
+ // be used verbatim in the output.
+ // For example:
+ //
+ // `ref: ".google.protobuf.Timestamp"`.
+ Ref string
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 EnumSchema_builder) Build() *EnumSchema {
+ m0 := &EnumSchema{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Description = b.Description
+ x.xxx_hidden_Default = b.Default
+ x.xxx_hidden_Title = b.Title
+ x.xxx_hidden_Required = b.Required
+ x.xxx_hidden_ReadOnly = b.ReadOnly
+ x.xxx_hidden_ExternalDocs = b.ExternalDocs
+ x.xxx_hidden_Example = b.Example
+ x.xxx_hidden_Ref = b.Ref
+ x.xxx_hidden_Extensions = b.Extensions
+ return m0
+}
+
+// `JSONSchema` represents properties from JSON Schema taken, and as used, in
+// the OpenAPI v2 spec.
+//
+// This includes changes made by OpenAPI v2.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// See also: https://cswr.github.io/JsonSchema/spec/basic_types/,
+// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json
+//
+// Example:
+//
+// message SimpleMessage {
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = {
+// json_schema: {
+// title: "SimpleMessage"
+// description: "A simple message."
+// required: ["id"]
+// }
+// };
+//
+// // Id represents the message identifier.
+// string id = 1; [
+// (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
+// description: "The unique identifier of the simple message."
+// }];
+// }
+type JSONSchema struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Ref string `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"`
+ xxx_hidden_Title string `protobuf:"bytes,5,opt,name=title,proto3" json:"title,omitempty"`
+ xxx_hidden_Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"`
+ xxx_hidden_Default string `protobuf:"bytes,7,opt,name=default,proto3" json:"default,omitempty"`
+ xxx_hidden_ReadOnly bool `protobuf:"varint,8,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+ xxx_hidden_Example string `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"`
+ xxx_hidden_MultipleOf float64 `protobuf:"fixed64,10,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"`
+ xxx_hidden_Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"`
+ xxx_hidden_ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"`
+ xxx_hidden_Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"`
+ xxx_hidden_ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"`
+ xxx_hidden_MaxLength uint64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"`
+ xxx_hidden_MinLength uint64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"`
+ xxx_hidden_Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"`
+ xxx_hidden_MaxItems uint64 `protobuf:"varint,20,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
+ xxx_hidden_MinItems uint64 `protobuf:"varint,21,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
+ xxx_hidden_UniqueItems bool `protobuf:"varint,22,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"`
+ xxx_hidden_MaxProperties uint64 `protobuf:"varint,24,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"`
+ xxx_hidden_MinProperties uint64 `protobuf:"varint,25,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"`
+ xxx_hidden_Required []string `protobuf:"bytes,26,rep,name=required,proto3" json:"required,omitempty"`
+ xxx_hidden_Array []string `protobuf:"bytes,34,rep,name=array,proto3" json:"array,omitempty"`
+ xxx_hidden_Type []JSONSchema_JSONSchemaSimpleTypes `protobuf:"varint,35,rep,packed,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.JSONSchema_JSONSchemaSimpleTypes" json:"type,omitempty"`
+ xxx_hidden_Format string `protobuf:"bytes,36,opt,name=format,proto3" json:"format,omitempty"`
+ xxx_hidden_Enum []string `protobuf:"bytes,46,rep,name=enum,proto3" json:"enum,omitempty"`
+ xxx_hidden_FieldConfiguration *JSONSchema_FieldConfiguration `protobuf:"bytes,1001,opt,name=field_configuration,json=fieldConfiguration,proto3" json:"field_configuration,omitempty"`
+ xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,48,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *JSONSchema) Reset() {
+ *x = JSONSchema{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *JSONSchema) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*JSONSchema) ProtoMessage() {}
+
+func (x *JSONSchema) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *JSONSchema) GetRef() string {
+ if x != nil {
+ return x.xxx_hidden_Ref
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetTitle() string {
+ if x != nil {
+ return x.xxx_hidden_Title
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetDescription() string {
+ if x != nil {
+ return x.xxx_hidden_Description
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetDefault() string {
+ if x != nil {
+ return x.xxx_hidden_Default
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetReadOnly() bool {
+ if x != nil {
+ return x.xxx_hidden_ReadOnly
+ }
+ return false
+}
+
+func (x *JSONSchema) GetExample() string {
+ if x != nil {
+ return x.xxx_hidden_Example
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetMultipleOf() float64 {
+ if x != nil {
+ return x.xxx_hidden_MultipleOf
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetMaximum() float64 {
+ if x != nil {
+ return x.xxx_hidden_Maximum
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetExclusiveMaximum() bool {
+ if x != nil {
+ return x.xxx_hidden_ExclusiveMaximum
+ }
+ return false
+}
+
+func (x *JSONSchema) GetMinimum() float64 {
+ if x != nil {
+ return x.xxx_hidden_Minimum
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetExclusiveMinimum() bool {
+ if x != nil {
+ return x.xxx_hidden_ExclusiveMinimum
+ }
+ return false
+}
+
+func (x *JSONSchema) GetMaxLength() uint64 {
+ if x != nil {
+ return x.xxx_hidden_MaxLength
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetMinLength() uint64 {
+ if x != nil {
+ return x.xxx_hidden_MinLength
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetPattern() string {
+ if x != nil {
+ return x.xxx_hidden_Pattern
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetMaxItems() uint64 {
+ if x != nil {
+ return x.xxx_hidden_MaxItems
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetMinItems() uint64 {
+ if x != nil {
+ return x.xxx_hidden_MinItems
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetUniqueItems() bool {
+ if x != nil {
+ return x.xxx_hidden_UniqueItems
+ }
+ return false
+}
+
+func (x *JSONSchema) GetMaxProperties() uint64 {
+ if x != nil {
+ return x.xxx_hidden_MaxProperties
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetMinProperties() uint64 {
+ if x != nil {
+ return x.xxx_hidden_MinProperties
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetRequired() []string {
+ if x != nil {
+ return x.xxx_hidden_Required
+ }
+ return nil
+}
+
+func (x *JSONSchema) GetArray() []string {
+ if x != nil {
+ return x.xxx_hidden_Array
+ }
+ return nil
+}
+
+func (x *JSONSchema) GetType() []JSONSchema_JSONSchemaSimpleTypes {
+ if x != nil {
+ return x.xxx_hidden_Type
+ }
+ return nil
+}
+
+func (x *JSONSchema) GetFormat() string {
+ if x != nil {
+ return x.xxx_hidden_Format
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetEnum() []string {
+ if x != nil {
+ return x.xxx_hidden_Enum
+ }
+ return nil
+}
+
+func (x *JSONSchema) GetFieldConfiguration() *JSONSchema_FieldConfiguration {
+ if x != nil {
+ return x.xxx_hidden_FieldConfiguration
+ }
+ return nil
+}
+
+func (x *JSONSchema) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.xxx_hidden_Extensions
+ }
+ return nil
+}
+
+func (x *JSONSchema) SetRef(v string) {
+ x.xxx_hidden_Ref = v
+}
+
+func (x *JSONSchema) SetTitle(v string) {
+ x.xxx_hidden_Title = v
+}
+
+func (x *JSONSchema) SetDescription(v string) {
+ x.xxx_hidden_Description = v
+}
+
+func (x *JSONSchema) SetDefault(v string) {
+ x.xxx_hidden_Default = v
+}
+
+func (x *JSONSchema) SetReadOnly(v bool) {
+ x.xxx_hidden_ReadOnly = v
+}
+
+func (x *JSONSchema) SetExample(v string) {
+ x.xxx_hidden_Example = v
+}
+
+func (x *JSONSchema) SetMultipleOf(v float64) {
+ x.xxx_hidden_MultipleOf = v
+}
+
+func (x *JSONSchema) SetMaximum(v float64) {
+ x.xxx_hidden_Maximum = v
+}
+
+func (x *JSONSchema) SetExclusiveMaximum(v bool) {
+ x.xxx_hidden_ExclusiveMaximum = v
+}
+
+func (x *JSONSchema) SetMinimum(v float64) {
+ x.xxx_hidden_Minimum = v
+}
+
+func (x *JSONSchema) SetExclusiveMinimum(v bool) {
+ x.xxx_hidden_ExclusiveMinimum = v
+}
+
+func (x *JSONSchema) SetMaxLength(v uint64) {
+ x.xxx_hidden_MaxLength = v
+}
+
+func (x *JSONSchema) SetMinLength(v uint64) {
+ x.xxx_hidden_MinLength = v
+}
+
+func (x *JSONSchema) SetPattern(v string) {
+ x.xxx_hidden_Pattern = v
+}
+
+func (x *JSONSchema) SetMaxItems(v uint64) {
+ x.xxx_hidden_MaxItems = v
+}
+
+func (x *JSONSchema) SetMinItems(v uint64) {
+ x.xxx_hidden_MinItems = v
+}
+
+func (x *JSONSchema) SetUniqueItems(v bool) {
+ x.xxx_hidden_UniqueItems = v
+}
+
+func (x *JSONSchema) SetMaxProperties(v uint64) {
+ x.xxx_hidden_MaxProperties = v
+}
+
+func (x *JSONSchema) SetMinProperties(v uint64) {
+ x.xxx_hidden_MinProperties = v
+}
+
+func (x *JSONSchema) SetRequired(v []string) {
+ x.xxx_hidden_Required = v
+}
+
+func (x *JSONSchema) SetArray(v []string) {
+ x.xxx_hidden_Array = v
+}
+
+func (x *JSONSchema) SetType(v []JSONSchema_JSONSchemaSimpleTypes) {
+ x.xxx_hidden_Type = v
+}
+
+func (x *JSONSchema) SetFormat(v string) {
+ x.xxx_hidden_Format = v
+}
+
+func (x *JSONSchema) SetEnum(v []string) {
+ x.xxx_hidden_Enum = v
+}
+
+func (x *JSONSchema) SetFieldConfiguration(v *JSONSchema_FieldConfiguration) {
+ x.xxx_hidden_FieldConfiguration = v
+}
+
+func (x *JSONSchema) SetExtensions(v map[string]*structpb.Value) {
+ x.xxx_hidden_Extensions = v
+}
+
+func (x *JSONSchema) HasFieldConfiguration() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_FieldConfiguration != nil
+}
+
+func (x *JSONSchema) ClearFieldConfiguration() {
+ x.xxx_hidden_FieldConfiguration = nil
+}
+
+type JSONSchema_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // Ref is used to define an external reference to include in the message.
+ // This could be a fully qualified proto message reference, and that type must
+ // be imported into the protofile. If no message is identified, the Ref will
+ // be used verbatim in the output.
+ // For example:
+ //
+ // `ref: ".google.protobuf.Timestamp"`.
+ Ref string
+ // The title of the schema.
+ Title string
+ // A short description of the schema.
+ Description string
+ Default string
+ ReadOnly bool
+ // A free-form property to include a JSON example of this field. This is copied
+ // verbatim to the output swagger.json. Quotes must be escaped.
+ // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+ Example string
+ MultipleOf float64
+ // Maximum represents an inclusive upper limit for a numeric instance. The
+ // value of MUST be a number,
+ Maximum float64
+ ExclusiveMaximum bool
+ // minimum represents an inclusive lower limit for a numeric instance. The
+ // value of MUST be a number,
+ Minimum float64
+ ExclusiveMinimum bool
+ MaxLength uint64
+ MinLength uint64
+ Pattern string
+ MaxItems uint64
+ MinItems uint64
+ UniqueItems bool
+ MaxProperties uint64
+ MinProperties uint64
+ Required []string
+ // Items in 'array' must be unique.
+ Array []string
+ Type []JSONSchema_JSONSchemaSimpleTypes
+ // `Format`
+ Format string
+ // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1
+ Enum []string
+ // Additional field level properties used when generating the OpenAPI v2 file.
+ FieldConfiguration *JSONSchema_FieldConfiguration
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 JSONSchema_builder) Build() *JSONSchema {
+ m0 := &JSONSchema{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Ref = b.Ref
+ x.xxx_hidden_Title = b.Title
+ x.xxx_hidden_Description = b.Description
+ x.xxx_hidden_Default = b.Default
+ x.xxx_hidden_ReadOnly = b.ReadOnly
+ x.xxx_hidden_Example = b.Example
+ x.xxx_hidden_MultipleOf = b.MultipleOf
+ x.xxx_hidden_Maximum = b.Maximum
+ x.xxx_hidden_ExclusiveMaximum = b.ExclusiveMaximum
+ x.xxx_hidden_Minimum = b.Minimum
+ x.xxx_hidden_ExclusiveMinimum = b.ExclusiveMinimum
+ x.xxx_hidden_MaxLength = b.MaxLength
+ x.xxx_hidden_MinLength = b.MinLength
+ x.xxx_hidden_Pattern = b.Pattern
+ x.xxx_hidden_MaxItems = b.MaxItems
+ x.xxx_hidden_MinItems = b.MinItems
+ x.xxx_hidden_UniqueItems = b.UniqueItems
+ x.xxx_hidden_MaxProperties = b.MaxProperties
+ x.xxx_hidden_MinProperties = b.MinProperties
+ x.xxx_hidden_Required = b.Required
+ x.xxx_hidden_Array = b.Array
+ x.xxx_hidden_Type = b.Type
+ x.xxx_hidden_Format = b.Format
+ x.xxx_hidden_Enum = b.Enum
+ x.xxx_hidden_FieldConfiguration = b.FieldConfiguration
+ x.xxx_hidden_Extensions = b.Extensions
+ return m0
+}
+
+// `Tag` is a representation of OpenAPI v2 specification's Tag object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject
+type Tag struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ xxx_hidden_Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,4,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Tag) Reset() {
+ *x = Tag{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Tag) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Tag) ProtoMessage() {}
+
+func (x *Tag) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Tag) GetName() string {
+ if x != nil {
+ return x.xxx_hidden_Name
+ }
+ return ""
+}
+
+func (x *Tag) GetDescription() string {
+ if x != nil {
+ return x.xxx_hidden_Description
+ }
+ return ""
+}
+
+func (x *Tag) GetExternalDocs() *ExternalDocumentation {
+ if x != nil {
+ return x.xxx_hidden_ExternalDocs
+ }
+ return nil
+}
+
+func (x *Tag) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.xxx_hidden_Extensions
+ }
+ return nil
+}
+
+func (x *Tag) SetName(v string) {
+ x.xxx_hidden_Name = v
+}
+
+func (x *Tag) SetDescription(v string) {
+ x.xxx_hidden_Description = v
+}
+
+func (x *Tag) SetExternalDocs(v *ExternalDocumentation) {
+ x.xxx_hidden_ExternalDocs = v
+}
+
+func (x *Tag) SetExtensions(v map[string]*structpb.Value) {
+ x.xxx_hidden_Extensions = v
+}
+
+func (x *Tag) HasExternalDocs() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_ExternalDocs != nil
+}
+
+func (x *Tag) ClearExternalDocs() {
+ x.xxx_hidden_ExternalDocs = nil
+}
+
+type Tag_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // The name of the tag. Use it to allow override of the name of a
+ // global Tag object, then use that name to reference the tag throughout the
+ // OpenAPI file.
+ Name string
+ // A short description for the tag. GFM syntax can be used for rich text
+ // representation.
+ Description string
+ // Additional external documentation for this tag.
+ ExternalDocs *ExternalDocumentation
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 Tag_builder) Build() *Tag {
+ m0 := &Tag{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Name = b.Name
+ x.xxx_hidden_Description = b.Description
+ x.xxx_hidden_ExternalDocs = b.ExternalDocs
+ x.xxx_hidden_Extensions = b.Extensions
+ return m0
+}
+
+// `SecurityDefinitions` is a representation of OpenAPI v2 specification's
+// Security Definitions object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject
+//
+// A declaration of the security schemes available to be used in the
+// specification. This does not enforce the security schemes on the operations
+// and only serves to provide the relevant details for each scheme.
+type SecurityDefinitions struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Security map[string]*SecurityScheme `protobuf:"bytes,1,rep,name=security,proto3" json:"security,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *SecurityDefinitions) Reset() {
+ *x = SecurityDefinitions{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityDefinitions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityDefinitions) ProtoMessage() {}
+
+func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *SecurityDefinitions) GetSecurity() map[string]*SecurityScheme {
+ if x != nil {
+ return x.xxx_hidden_Security
+ }
+ return nil
+}
+
+func (x *SecurityDefinitions) SetSecurity(v map[string]*SecurityScheme) {
+ x.xxx_hidden_Security = v
+}
+
+type SecurityDefinitions_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // A single security scheme definition, mapping a "name" to the scheme it
+ // defines.
+ Security map[string]*SecurityScheme
+}
+
+func (b0 SecurityDefinitions_builder) Build() *SecurityDefinitions {
+ m0 := &SecurityDefinitions{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Security = b.Security
+ return m0
+}
+
+// `SecurityScheme` is a representation of OpenAPI v2 specification's
+// Security Scheme object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject
+//
+// Allows the definition of a security scheme that can be used by the
+// operations. Supported schemes are basic authentication, an API key (either as
+// a header or as a query parameter) and OAuth2's common flows (implicit,
+// password, application and access code).
+type SecurityScheme struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Type SecurityScheme_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Type" json:"type,omitempty"`
+ xxx_hidden_Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ xxx_hidden_Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ xxx_hidden_In SecurityScheme_In `protobuf:"varint,4,opt,name=in,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_In" json:"in,omitempty"`
+ xxx_hidden_Flow SecurityScheme_Flow `protobuf:"varint,5,opt,name=flow,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Flow" json:"flow,omitempty"`
+ xxx_hidden_AuthorizationUrl string `protobuf:"bytes,6,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"`
+ xxx_hidden_TokenUrl string `protobuf:"bytes,7,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"`
+ xxx_hidden_Scopes *Scopes `protobuf:"bytes,8,opt,name=scopes,proto3" json:"scopes,omitempty"`
+ xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *SecurityScheme) Reset() {
+ *x = SecurityScheme{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityScheme) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityScheme) ProtoMessage() {}
+
+func (x *SecurityScheme) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *SecurityScheme) GetType() SecurityScheme_Type {
+ if x != nil {
+ return x.xxx_hidden_Type
+ }
+ return SecurityScheme_TYPE_INVALID
+}
+
+func (x *SecurityScheme) GetDescription() string {
+ if x != nil {
+ return x.xxx_hidden_Description
+ }
+ return ""
+}
+
+func (x *SecurityScheme) GetName() string {
+ if x != nil {
+ return x.xxx_hidden_Name
+ }
+ return ""
+}
+
+func (x *SecurityScheme) GetIn() SecurityScheme_In {
+ if x != nil {
+ return x.xxx_hidden_In
+ }
+ return SecurityScheme_IN_INVALID
+}
+
+func (x *SecurityScheme) GetFlow() SecurityScheme_Flow {
+ if x != nil {
+ return x.xxx_hidden_Flow
+ }
+ return SecurityScheme_FLOW_INVALID
+}
+
+func (x *SecurityScheme) GetAuthorizationUrl() string {
+ if x != nil {
+ return x.xxx_hidden_AuthorizationUrl
+ }
+ return ""
+}
+
+func (x *SecurityScheme) GetTokenUrl() string {
+ if x != nil {
+ return x.xxx_hidden_TokenUrl
+ }
+ return ""
+}
+
+func (x *SecurityScheme) GetScopes() *Scopes {
+ if x != nil {
+ return x.xxx_hidden_Scopes
+ }
+ return nil
+}
+
+func (x *SecurityScheme) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.xxx_hidden_Extensions
+ }
+ return nil
+}
+
+func (x *SecurityScheme) SetType(v SecurityScheme_Type) {
+ x.xxx_hidden_Type = v
+}
+
+func (x *SecurityScheme) SetDescription(v string) {
+ x.xxx_hidden_Description = v
+}
+
+func (x *SecurityScheme) SetName(v string) {
+ x.xxx_hidden_Name = v
+}
+
+func (x *SecurityScheme) SetIn(v SecurityScheme_In) {
+ x.xxx_hidden_In = v
+}
+
+func (x *SecurityScheme) SetFlow(v SecurityScheme_Flow) {
+ x.xxx_hidden_Flow = v
+}
+
+func (x *SecurityScheme) SetAuthorizationUrl(v string) {
+ x.xxx_hidden_AuthorizationUrl = v
+}
+
+func (x *SecurityScheme) SetTokenUrl(v string) {
+ x.xxx_hidden_TokenUrl = v
+}
+
+func (x *SecurityScheme) SetScopes(v *Scopes) {
+ x.xxx_hidden_Scopes = v
+}
+
+func (x *SecurityScheme) SetExtensions(v map[string]*structpb.Value) {
+ x.xxx_hidden_Extensions = v
+}
+
+func (x *SecurityScheme) HasScopes() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_Scopes != nil
+}
+
+func (x *SecurityScheme) ClearScopes() {
+ x.xxx_hidden_Scopes = nil
+}
+
+type SecurityScheme_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // The type of the security scheme. Valid values are "basic",
+ // "apiKey" or "oauth2".
+ Type SecurityScheme_Type
+ // A short description for security scheme.
+ Description string
+ // The name of the header or query parameter to be used.
+ // Valid for apiKey.
+ Name string
+ // The location of the API key. Valid values are "query" or
+ // "header".
+ // Valid for apiKey.
+ In SecurityScheme_In
+ // The flow used by the OAuth2 security scheme. Valid values are
+ // "implicit", "password", "application" or "accessCode".
+ // Valid for oauth2.
+ Flow SecurityScheme_Flow
+ // The authorization URL to be used for this flow. This SHOULD be in
+ // the form of a URL.
+ // Valid for oauth2/implicit and oauth2/accessCode.
+ AuthorizationUrl string
+ // The token URL to be used for this flow. This SHOULD be in the
+ // form of a URL.
+ // Valid for oauth2/password, oauth2/application and oauth2/accessCode.
+ TokenUrl string
+ // The available scopes for the OAuth2 security scheme.
+ // Valid for oauth2.
+ Scopes *Scopes
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 SecurityScheme_builder) Build() *SecurityScheme {
+ m0 := &SecurityScheme{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Type = b.Type
+ x.xxx_hidden_Description = b.Description
+ x.xxx_hidden_Name = b.Name
+ x.xxx_hidden_In = b.In
+ x.xxx_hidden_Flow = b.Flow
+ x.xxx_hidden_AuthorizationUrl = b.AuthorizationUrl
+ x.xxx_hidden_TokenUrl = b.TokenUrl
+ x.xxx_hidden_Scopes = b.Scopes
+ x.xxx_hidden_Extensions = b.Extensions
+ return m0
+}
+
+// `SecurityRequirement` is a representation of OpenAPI v2 specification's
+// Security Requirement object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject
+//
+// Lists the required security schemes to execute this operation. The object can
+// have multiple security schemes declared in it which are all required (that
+// is, there is a logical AND between the schemes).
+//
+// The name used for each property MUST correspond to a security scheme
+// declared in the Security Definitions.
+type SecurityRequirement struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue `protobuf:"bytes,1,rep,name=security_requirement,json=securityRequirement,proto3" json:"security_requirement,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *SecurityRequirement) Reset() {
+ *x = SecurityRequirement{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityRequirement) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityRequirement) ProtoMessage() {}
+
+func (x *SecurityRequirement) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *SecurityRequirement) GetSecurityRequirement() map[string]*SecurityRequirement_SecurityRequirementValue {
+ if x != nil {
+ return x.xxx_hidden_SecurityRequirement
+ }
+ return nil
+}
+
+func (x *SecurityRequirement) SetSecurityRequirement(v map[string]*SecurityRequirement_SecurityRequirementValue) {
+ x.xxx_hidden_SecurityRequirement = v
+}
+
+type SecurityRequirement_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // Each name must correspond to a security scheme which is declared in
+ // the Security Definitions. If the security scheme is of type "oauth2",
+ // then the value is a list of scope names required for the execution.
+ // For other security scheme types, the array MUST be empty.
+ SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue
+}
+
+func (b0 SecurityRequirement_builder) Build() *SecurityRequirement {
+ m0 := &SecurityRequirement{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_SecurityRequirement = b.SecurityRequirement
+ return m0
+}
+
+// `Scopes` is a representation of OpenAPI v2 specification's Scopes object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject
+//
+// Lists the available scopes for an OAuth2 security scheme.
+type Scopes struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Scope map[string]string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Scopes) Reset() {
+ *x = Scopes{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Scopes) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Scopes) ProtoMessage() {}
+
+func (x *Scopes) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Scopes) GetScope() map[string]string {
+ if x != nil {
+ return x.xxx_hidden_Scope
+ }
+ return nil
+}
+
+func (x *Scopes) SetScope(v map[string]string) {
+ x.xxx_hidden_Scope = v
+}
+
+type Scopes_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // Maps between a name of a scope to a short description of it (as the value
+ // of the property).
+ Scope map[string]string
+}
+
+func (b0 Scopes_builder) Build() *Scopes {
+ m0 := &Scopes{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Scope = b.Scope
+ return m0
+}
+
+// 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file.
+// These properties are not defined by OpenAPIv2, but they are used to control the generation.
+type JSONSchema_FieldConfiguration struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_PathParamName string `protobuf:"bytes,47,opt,name=path_param_name,json=pathParamName,proto3" json:"path_param_name,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *JSONSchema_FieldConfiguration) Reset() {
+ *x = JSONSchema_FieldConfiguration{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *JSONSchema_FieldConfiguration) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*JSONSchema_FieldConfiguration) ProtoMessage() {}
+
+func (x *JSONSchema_FieldConfiguration) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *JSONSchema_FieldConfiguration) GetPathParamName() string {
+ if x != nil {
+ return x.xxx_hidden_PathParamName
+ }
+ return ""
+}
+
+func (x *JSONSchema_FieldConfiguration) SetPathParamName(v string) {
+ x.xxx_hidden_PathParamName = v
+}
+
+type JSONSchema_FieldConfiguration_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // Alternative parameter name when used as path parameter. If set, this will
+ // be used as the complete parameter name when this field is used as a path
+ // parameter. Use this to avoid having auto generated path parameter names
+ // for overlapping paths.
+ PathParamName string
+}
+
+func (b0 JSONSchema_FieldConfiguration_builder) Build() *JSONSchema_FieldConfiguration {
+ m0 := &JSONSchema_FieldConfiguration{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_PathParamName = b.PathParamName
+ return m0
+}
+
+// If the security scheme is of type "oauth2", then the value is a list of
+// scope names required for the execution. For other security scheme types,
+// the array MUST be empty.
+type SecurityRequirement_SecurityRequirementValue struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Scope []string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) Reset() {
+ *x = SecurityRequirement_SecurityRequirementValue{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityRequirement_SecurityRequirementValue) ProtoMessage() {}
+
+func (x *SecurityRequirement_SecurityRequirementValue) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) GetScope() []string {
+ if x != nil {
+ return x.xxx_hidden_Scope
+ }
+ return nil
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) SetScope(v []string) {
+ x.xxx_hidden_Scope = v
+}
+
+type SecurityRequirement_SecurityRequirementValue_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ Scope []string
+}
+
+func (b0 SecurityRequirement_SecurityRequirementValue_builder) Build() *SecurityRequirement_SecurityRequirementValue {
+ m0 := &SecurityRequirement_SecurityRequirementValue{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Scope = b.Scope
+ return m0
+}
+
+var File_protoc_gen_openapiv2_options_openapiv2_proto protoreflect.FileDescriptor
+
+var file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = []byte{
+ 0x0a, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63,
+ 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb3, 0x08, 0x0a, 0x07, 0x53, 0x77, 0x61, 0x67,
+ 0x67, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x12, 0x43, 0x0a,
+ 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72,
+ 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+ 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e,
+ 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x70,
+ 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x62, 0x61, 0x73, 0x65, 0x50,
+ 0x61, 0x74, 0x68, 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x05,
+ 0x20, 0x03, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+ 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73,
+ 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08,
+ 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08,
+ 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72,
+ 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+ 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09,
+ 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x71, 0x0a, 0x14, 0x73, 0x65, 0x63,
+ 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69,
+ 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+ 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5a, 0x0a, 0x08,
+ 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e,
+ 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69,
+ 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72,
+ 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08,
+ 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x42, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73,
+ 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+ 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+ 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x65, 0x0a, 0x0d,
+ 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0e, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44,
+ 0x6f, 0x63, 0x73, 0x12, 0x62, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xd6, 0x07,
+ 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74,
+ 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12,
+ 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65,
+ 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+ 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45,
+ 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f,
+ 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65,
+ 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65,
+ 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x61, 0x0a,
+ 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+ 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73,
+ 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28,
+ 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63,
+ 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a,
+ 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x5a, 0x0a,
+ 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75,
+ 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52,
+ 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x64, 0x0a, 0x0a, 0x65, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x55, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61,
+ 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70, 0x63,
+ 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f,
+ 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
+ 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0x62, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65,
+ 0x74, 0x65, 0x72, 0x73, 0x12, 0x54, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+ 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
+ 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0xa3, 0x02, 0x0a, 0x0f, 0x48,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x53, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+ 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x2e, 0x54,
+ 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72,
+ 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61,
+ 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x45, 0x0a,
+ 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
+ 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0a,
+ 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e,
+ 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45,
+ 0x41, 0x4e, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08,
+ 0x22, 0xd8, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66,
+ 0x61, 0x75, 0x6c, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61,
+ 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x4a, 0x04, 0x08,
+ 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a,
+ 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10,
+ 0x0b, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08,
+ 0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a,
+ 0x04, 0x08, 0x11, 0x10, 0x12, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0x9a, 0x05, 0x0a, 0x08,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5a, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+ 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+ 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+ 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64,
+ 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x73, 0x12, 0x5d, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73,
+ 0x12, 0x63, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+ 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x6d, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x47, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+ 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+ 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd6, 0x03, 0x0a, 0x04, 0x49, 0x6e, 0x66,
+ 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72,
+ 0x6d, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+ 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63,
+ 0x74, 0x12, 0x4c, 0x0a, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+ 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4c,
+ 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12,
+ 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x0a, 0x65, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45,
+ 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a,
+ 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x22, 0x45, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75,
+ 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x2f, 0x0a, 0x07, 0x4c, 0x69, 0x63, 0x65,
+ 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x4b, 0x0a, 0x15, 0x45, 0x78, 0x74,
+ 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0xaa, 0x02, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x12, 0x56, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+ 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+ 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x6a,
+ 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73,
+ 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12,
+ 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a, 0x0d,
+ 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44,
+ 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4a, 0x04, 0x08,
+ 0x04, 0x10, 0x05, 0x22, 0xe8, 0x03, 0x0a, 0x0a, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x14,
+ 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74,
+ 0x69, 0x74, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64,
+ 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a,
+ 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+ 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
+ 0x44, 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x10,
+ 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66,
+ 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+ 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd7,
+ 0x0a, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x10, 0x0a,
+ 0x03, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12,
+ 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
+ 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75,
+ 0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
+ 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x18,
+ 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74,
+ 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d,
+ 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78,
+ 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69,
+ 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65,
+ 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10,
+ 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d,
+ 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28,
+ 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78,
+ 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18,
+ 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65,
+ 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c,
+ 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x78,
+ 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65,
+ 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c,
+ 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e,
+ 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12,
+ 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09,
+ 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69,
+ 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e,
+ 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x18,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74,
+ 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65,
+ 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x69, 0x6e,
+ 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65,
+ 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65,
+ 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x18,
+ 0x22, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x12, 0x5f, 0x0a, 0x04,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x23, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x4b, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x69, 0x6d, 0x70,
+ 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a,
+ 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66,
+ 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x2e, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x7a, 0x0a, 0x13, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x12, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x30, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63,
+ 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f,
+ 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x12,
+ 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d,
+ 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x74,
+ 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x22, 0x77, 0x0a, 0x15, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53,
+ 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e,
+ 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x52, 0x52, 0x41, 0x59,
+ 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45, 0x41, 0x4e, 0x10, 0x02, 0x12,
+ 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04,
+ 0x4e, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52,
+ 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, 0x0a,
+ 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02,
+ 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12,
+ 0x10, 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, 0x4a, 0x04, 0x08, 0x17, 0x10, 0x18, 0x4a, 0x04,
+ 0x08, 0x1b, 0x10, 0x1c, 0x4a, 0x04, 0x08, 0x1c, 0x10, 0x1d, 0x4a, 0x04, 0x08, 0x1d, 0x10, 0x1e,
+ 0x4a, 0x04, 0x08, 0x1e, 0x10, 0x22, 0x4a, 0x04, 0x08, 0x25, 0x10, 0x2a, 0x4a, 0x04, 0x08, 0x2a,
+ 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x2b, 0x10, 0x2e, 0x22, 0xd9, 0x02, 0x0a, 0x03, 0x54, 0x61, 0x67,
+ 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e,
+ 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e,
+ 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x5e, 0x0a,
+ 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61,
+ 0x67, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a,
+ 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+ 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf7, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+ 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x68, 0x0a, 0x08,
+ 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4c,
+ 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69,
+ 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72,
+ 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53,
+ 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x73, 0x65,
+ 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x1a, 0x76, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69,
+ 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4f, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e,
+ 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67,
+ 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68,
+ 0x65, 0x6d, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xff,
+ 0x06, 0x0a, 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x65, 0x12, 0x52, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75,
+ 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4c, 0x0a, 0x02, 0x69,
+ 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x65, 0x2e, 0x49, 0x6e, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x52, 0x0a, 0x04, 0x66, 0x6c, 0x6f,
+ 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x2b, 0x0a,
+ 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75,
+ 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72,
+ 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65,
+ 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70,
+ 0x65, 0x73, 0x12, 0x69, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+ 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+ 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a,
+ 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+ 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4b, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c,
+ 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0e,
+ 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x10,
+ 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x02,
+ 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x41, 0x55, 0x54, 0x48, 0x32, 0x10,
+ 0x03, 0x22, 0x31, 0x0a, 0x02, 0x49, 0x6e, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x5f, 0x49, 0x4e,
+ 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x5f, 0x51, 0x55,
+ 0x45, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x49, 0x4e, 0x5f, 0x48, 0x45, 0x41, 0x44,
+ 0x45, 0x52, 0x10, 0x02, 0x22, 0x6a, 0x0a, 0x04, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x10, 0x0a, 0x0c,
+ 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x11,
+ 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10,
+ 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x57, 0x4f,
+ 0x52, 0x44, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x41, 0x50, 0x50,
+ 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c,
+ 0x4f, 0x57, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x04,
+ 0x22, 0xf6, 0x02, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71,
+ 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x8a, 0x01, 0x0a, 0x14, 0x73, 0x65, 0x63,
+ 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e,
+ 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75,
+ 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
+ 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72,
+ 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0x30, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+ 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09,
+ 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75,
+ 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+ 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72,
+ 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65,
+ 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, 0x01, 0x0a, 0x06, 0x53, 0x63,
+ 0x6f, 0x70, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x38, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70,
+ 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
+ 0x38, 0x01, 0x2a, 0x3b, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x0b, 0x0a, 0x07,
+ 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54,
+ 0x50, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x02, 0x12, 0x06,
+ 0x0a, 0x02, 0x57, 0x53, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x57, 0x53, 0x53, 0x10, 0x04, 0x42,
+ 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72,
+ 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70,
+ 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes = make([]protoimpl.EnumInfo, 6)
+var file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes = make([]protoimpl.MessageInfo, 35)
+var file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = []any{
+ (Scheme)(0), // 0: grpc.gateway.protoc_gen_openapiv2.options.Scheme
+ (HeaderParameter_Type)(0), // 1: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type
+ (JSONSchema_JSONSchemaSimpleTypes)(0), // 2: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes
+ (SecurityScheme_Type)(0), // 3: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type
+ (SecurityScheme_In)(0), // 4: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In
+ (SecurityScheme_Flow)(0), // 5: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow
+ (*Swagger)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger
+ (*Operation)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation
+ (*Parameters)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Parameters
+ (*HeaderParameter)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter
+ (*Header)(nil), // 10: grpc.gateway.protoc_gen_openapiv2.options.Header
+ (*Response)(nil), // 11: grpc.gateway.protoc_gen_openapiv2.options.Response
+ (*Info)(nil), // 12: grpc.gateway.protoc_gen_openapiv2.options.Info
+ (*Contact)(nil), // 13: grpc.gateway.protoc_gen_openapiv2.options.Contact
+ (*License)(nil), // 14: grpc.gateway.protoc_gen_openapiv2.options.License
+ (*ExternalDocumentation)(nil), // 15: grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ (*Schema)(nil), // 16: grpc.gateway.protoc_gen_openapiv2.options.Schema
+ (*EnumSchema)(nil), // 17: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+ (*JSONSchema)(nil), // 18: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+ (*Tag)(nil), // 19: grpc.gateway.protoc_gen_openapiv2.options.Tag
+ (*SecurityDefinitions)(nil), // 20: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions
+ (*SecurityScheme)(nil), // 21: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme
+ (*SecurityRequirement)(nil), // 22: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+ (*Scopes)(nil), // 23: grpc.gateway.protoc_gen_openapiv2.options.Scopes
+ nil, // 24: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry
+ nil, // 25: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry
+ nil, // 26: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry
+ nil, // 27: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry
+ nil, // 28: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry
+ nil, // 29: grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry
+ nil, // 30: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry
+ nil, // 31: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry
+ nil, // 32: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry
+ (*JSONSchema_FieldConfiguration)(nil), // 33: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration
+ nil, // 34: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry
+ nil, // 35: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry
+ nil, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry
+ nil, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry
+ (*SecurityRequirement_SecurityRequirementValue)(nil), // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue
+ nil, // 39: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry
+ nil, // 40: grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry
+ (*structpb.Value)(nil), // 41: google.protobuf.Value
+}
+var file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = []int32{
+ 12, // 0: grpc.gateway.protoc_gen_openapiv2.options.Swagger.info:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info
+ 0, // 1: grpc.gateway.protoc_gen_openapiv2.options.Swagger.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme
+ 24, // 2: grpc.gateway.protoc_gen_openapiv2.options.Swagger.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry
+ 20, // 3: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security_definitions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions
+ 22, // 4: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+ 19, // 5: grpc.gateway.protoc_gen_openapiv2.options.Swagger.tags:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag
+ 15, // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ 25, // 7: grpc.gateway.protoc_gen_openapiv2.options.Swagger.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry
+ 15, // 8: grpc.gateway.protoc_gen_openapiv2.options.Operation.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ 26, // 9: grpc.gateway.protoc_gen_openapiv2.options.Operation.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry
+ 0, // 10: grpc.gateway.protoc_gen_openapiv2.options.Operation.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme
+ 22, // 11: grpc.gateway.protoc_gen_openapiv2.options.Operation.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+ 27, // 12: grpc.gateway.protoc_gen_openapiv2.options.Operation.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry
+ 8, // 13: grpc.gateway.protoc_gen_openapiv2.options.Operation.parameters:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Parameters
+ 9, // 14: grpc.gateway.protoc_gen_openapiv2.options.Parameters.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter
+ 1, // 15: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type
+ 16, // 16: grpc.gateway.protoc_gen_openapiv2.options.Response.schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema
+ 28, // 17: grpc.gateway.protoc_gen_openapiv2.options.Response.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry
+ 29, // 18: grpc.gateway.protoc_gen_openapiv2.options.Response.examples:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry
+ 30, // 19: grpc.gateway.protoc_gen_openapiv2.options.Response.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry
+ 13, // 20: grpc.gateway.protoc_gen_openapiv2.options.Info.contact:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Contact
+ 14, // 21: grpc.gateway.protoc_gen_openapiv2.options.Info.license:type_name -> grpc.gateway.protoc_gen_openapiv2.options.License
+ 31, // 22: grpc.gateway.protoc_gen_openapiv2.options.Info.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry
+ 18, // 23: grpc.gateway.protoc_gen_openapiv2.options.Schema.json_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+ 15, // 24: grpc.gateway.protoc_gen_openapiv2.options.Schema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ 15, // 25: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ 32, // 26: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry
+ 2, // 27: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes
+ 33, // 28: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.field_configuration:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration
+ 34, // 29: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry
+ 15, // 30: grpc.gateway.protoc_gen_openapiv2.options.Tag.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ 35, // 31: grpc.gateway.protoc_gen_openapiv2.options.Tag.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry
+ 36, // 32: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry
+ 3, // 33: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type
+ 4, // 34: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.in:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In
+ 5, // 35: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.flow:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow
+ 23, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.scopes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes
+ 37, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry
+ 39, // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.security_requirement:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry
+ 40, // 39: grpc.gateway.protoc_gen_openapiv2.options.Scopes.scope:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry
+ 11, // 40: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response
+ 41, // 41: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 11, // 42: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response
+ 41, // 43: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 10, // 44: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Header
+ 41, // 45: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 41, // 46: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 41, // 47: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 41, // 48: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 41, // 49: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 21, // 50: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme
+ 41, // 51: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 38, // 52: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue
+ 53, // [53:53] is the sub-list for method output_type
+ 53, // [53:53] is the sub-list for method input_type
+ 53, // [53:53] is the sub-list for extension type_name
+ 53, // [53:53] is the sub-list for extension extendee
+ 0, // [0:53] is the sub-list for field type_name
+}
+
+func init() { file_protoc_gen_openapiv2_options_openapiv2_proto_init() }
+func file_protoc_gen_openapiv2_options_openapiv2_proto_init() {
+ if File_protoc_gen_openapiv2_options_openapiv2_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc,
+ NumEnums: 6,
+ NumMessages: 35,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes,
+ DependencyIndexes: file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs,
+ EnumInfos: file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes,
+ MessageInfos: file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes,
+ }.Build()
+ File_protoc_gen_openapiv2_options_openapiv2_proto = out.File
+ file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = nil
+ file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = nil
+ file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
new file mode 100644
index 00000000000..04b4bebf3d5
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
@@ -0,0 +1,98 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+ name = "runtime",
+ srcs = [
+ "context.go",
+ "convert.go",
+ "doc.go",
+ "errors.go",
+ "fieldmask.go",
+ "handler.go",
+ "marshal_httpbodyproto.go",
+ "marshal_json.go",
+ "marshal_jsonpb.go",
+ "marshal_proto.go",
+ "marshaler.go",
+ "marshaler_registry.go",
+ "mux.go",
+ "pattern.go",
+ "proto2_convert.go",
+ "query.go",
+ ],
+ importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/runtime",
+ deps = [
+ "//internal/httprule",
+ "//utilities",
+ "@org_golang_google_genproto_googleapis_api//httpbody",
+ "@org_golang_google_grpc//:grpc",
+ "@org_golang_google_grpc//codes",
+ "@org_golang_google_grpc//grpclog",
+ "@org_golang_google_grpc//health/grpc_health_v1",
+ "@org_golang_google_grpc//metadata",
+ "@org_golang_google_grpc//status",
+ "@org_golang_google_protobuf//encoding/protojson",
+ "@org_golang_google_protobuf//proto",
+ "@org_golang_google_protobuf//reflect/protoreflect",
+ "@org_golang_google_protobuf//reflect/protoregistry",
+ "@org_golang_google_protobuf//types/known/durationpb",
+ "@org_golang_google_protobuf//types/known/fieldmaskpb",
+ "@org_golang_google_protobuf//types/known/structpb",
+ "@org_golang_google_protobuf//types/known/timestamppb",
+ "@org_golang_google_protobuf//types/known/wrapperspb",
+ ],
+)
+
+go_test(
+ name = "runtime_test",
+ size = "small",
+ srcs = [
+ "context_test.go",
+ "convert_test.go",
+ "errors_test.go",
+ "fieldmask_test.go",
+ "handler_test.go",
+ "marshal_httpbodyproto_test.go",
+ "marshal_json_test.go",
+ "marshal_jsonpb_test.go",
+ "marshal_proto_test.go",
+ "marshaler_registry_test.go",
+ "mux_internal_test.go",
+ "mux_test.go",
+ "pattern_test.go",
+ "query_fuzz_test.go",
+ "query_test.go",
+ ],
+ embed = [":runtime"],
+ deps = [
+ "//runtime/internal/examplepb",
+ "//utilities",
+ "@com_github_google_go_cmp//cmp",
+ "@com_github_google_go_cmp//cmp/cmpopts",
+ "@org_golang_google_genproto_googleapis_api//httpbody",
+ "@org_golang_google_genproto_googleapis_rpc//errdetails",
+ "@org_golang_google_genproto_googleapis_rpc//status",
+ "@org_golang_google_grpc//:grpc",
+ "@org_golang_google_grpc//codes",
+ "@org_golang_google_grpc//health/grpc_health_v1",
+ "@org_golang_google_grpc//metadata",
+ "@org_golang_google_grpc//status",
+ "@org_golang_google_protobuf//encoding/protojson",
+ "@org_golang_google_protobuf//proto",
+ "@org_golang_google_protobuf//testing/protocmp",
+ "@org_golang_google_protobuf//types/known/durationpb",
+ "@org_golang_google_protobuf//types/known/emptypb",
+ "@org_golang_google_protobuf//types/known/fieldmaskpb",
+ "@org_golang_google_protobuf//types/known/structpb",
+ "@org_golang_google_protobuf//types/known/timestamppb",
+ "@org_golang_google_protobuf//types/known/wrapperspb",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":runtime",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
new file mode 100644
index 00000000000..00b2228a1de
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
@@ -0,0 +1,417 @@
+package runtime
+
+import (
+ "context"
+ "encoding/base64"
+ "fmt"
+ "net"
+ "net/http"
+ "net/textproto"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+)
+
+// MetadataHeaderPrefix is the http prefix that represents custom metadata
+// parameters to or from a gRPC call.
+const MetadataHeaderPrefix = "Grpc-Metadata-"
+
+// MetadataPrefix is prepended to permanent HTTP header keys (as specified
+// by the IANA) when added to the gRPC context.
+const MetadataPrefix = "grpcgateway-"
+
+// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to
+// HTTP headers in a response handled by grpc-gateway
+const MetadataTrailerPrefix = "Grpc-Trailer-"
+
+const metadataGrpcTimeout = "Grpc-Timeout"
+const metadataHeaderBinarySuffix = "-Bin"
+
+const xForwardedFor = "X-Forwarded-For"
+const xForwardedHost = "X-Forwarded-Host"
+
+// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
+// header isn't present. If the value is 0 the sent `context` will not have a timeout.
+var DefaultContextTimeout = 0 * time.Second
+
+// malformedHTTPHeaders lists the headers that the gRPC server may reject outright as malformed.
+// See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more context.
+var malformedHTTPHeaders = map[string]struct{}{
+ "connection": {},
+}
+
+type (
+ rpcMethodKey struct{}
+ httpPathPatternKey struct{}
+ httpPatternKey struct{}
+
+ AnnotateContextOption func(ctx context.Context) context.Context
+)
+
+func WithHTTPPathPattern(pattern string) AnnotateContextOption {
+ return func(ctx context.Context) context.Context {
+ return withHTTPPathPattern(ctx, pattern)
+ }
+}
+
+func decodeBinHeader(v string) ([]byte, error) {
+ if len(v)%4 == 0 {
+ // Input was padded, or padding was not necessary.
+ return base64.StdEncoding.DecodeString(v)
+ }
+ return base64.RawStdEncoding.DecodeString(v)
+}
+
+/*
+AnnotateContext adds context information such as metadata from the request.
+
+At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
+except that the forwarded destination is not another HTTP service but rather
+a gRPC service.
+*/
+func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) {
+ ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...)
+ if err != nil {
+ return nil, err
+ }
+ if md == nil {
+ return ctx, nil
+ }
+
+ return metadata.NewOutgoingContext(ctx, md), nil
+}
+
+// AnnotateIncomingContext adds context information such as metadata from the request.
+// Attach metadata as incoming context.
+func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) {
+ ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...)
+ if err != nil {
+ return nil, err
+ }
+ if md == nil {
+ return ctx, nil
+ }
+
+ return metadata.NewIncomingContext(ctx, md), nil
+}
+
+func isValidGRPCMetadataKey(key string) bool {
+ // Must be a valid gRPC "Header-Name" as defined here:
+ // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md
+ // This means 0-9 a-z _ - .
+ // Only lowercase letters are valid in the wire protocol, but the client library will normalize
+ // uppercase ASCII to lowercase, so uppercase ASCII is also acceptable.
+ bytes := []byte(key) // gRPC validates strings on the byte level, not Unicode.
+ for _, ch := range bytes {
+ validLowercaseLetter := ch >= 'a' && ch <= 'z'
+ validUppercaseLetter := ch >= 'A' && ch <= 'Z'
+ validDigit := ch >= '0' && ch <= '9'
+ validOther := ch == '.' || ch == '-' || ch == '_'
+ if !validLowercaseLetter && !validUppercaseLetter && !validDigit && !validOther {
+ return false
+ }
+ }
+ return true
+}
+
+func isValidGRPCMetadataTextValue(textValue string) bool {
+ // Must be a valid gRPC "ASCII-Value" as defined here:
+ // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md
+ // This means printable ASCII (including/plus spaces); 0x20 to 0x7E inclusive.
+ bytes := []byte(textValue) // gRPC validates strings on the byte level, not Unicode.
+ for _, ch := range bytes {
+ if ch < 0x20 || ch > 0x7E {
+ return false
+ }
+ }
+ return true
+}
+
+func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, metadata.MD, error) {
+ ctx = withRPCMethod(ctx, rpcMethodName)
+ for _, o := range options {
+ ctx = o(ctx)
+ }
+ timeout := DefaultContextTimeout
+ if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
+ var err error
+ timeout, err = timeoutDecode(tm)
+ if err != nil {
+ return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
+ }
+ }
+ var pairs []string
+ for key, vals := range req.Header {
+ key = textproto.CanonicalMIMEHeaderKey(key)
+ switch key {
+ case xForwardedFor, xForwardedHost:
+ // Handled separately below
+ continue
+ }
+
+ for _, val := range vals {
+ // For backwards-compatibility, pass through 'authorization' header with no prefix.
+ if key == "Authorization" {
+ pairs = append(pairs, "authorization", val)
+ }
+ if h, ok := mux.incomingHeaderMatcher(key); ok {
+ if !isValidGRPCMetadataKey(h) {
+ grpclog.Errorf("HTTP header name %q is not valid as gRPC metadata key; skipping", h)
+ continue
+ }
+ // Handles "-bin" metadata in grpc, since grpc will do another base64
+ // encode before sending to server, we need to decode it first.
+ if strings.HasSuffix(key, metadataHeaderBinarySuffix) {
+ b, err := decodeBinHeader(val)
+ if err != nil {
+ return nil, nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err)
+ }
+
+ val = string(b)
+ } else if !isValidGRPCMetadataTextValue(val) {
+ grpclog.Errorf("Value of HTTP header %q contains non-ASCII value (not valid as gRPC metadata): skipping", h)
+ continue
+ }
+ pairs = append(pairs, h, val)
+ }
+ }
+ }
+ if host := req.Header.Get(xForwardedHost); host != "" {
+ pairs = append(pairs, strings.ToLower(xForwardedHost), host)
+ } else if req.Host != "" {
+ pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host)
+ }
+
+ xff := req.Header.Values(xForwardedFor)
+ if addr := req.RemoteAddr; addr != "" {
+ if remoteIP, _, err := net.SplitHostPort(addr); err == nil {
+ xff = append(xff, remoteIP)
+ }
+ }
+ if len(xff) > 0 {
+ pairs = append(pairs, strings.ToLower(xForwardedFor), strings.Join(xff, ", "))
+ }
+
+ if timeout != 0 {
+ ctx, _ = context.WithTimeout(ctx, timeout)
+ }
+ md := metadata.Pairs(pairs...)
+ for _, mda := range mux.metadataAnnotators {
+ md = metadata.Join(md, mda(ctx, req))
+ }
+ if len(md) == 0 {
+ return ctx, nil, nil
+ }
+ return ctx, md, nil
+}
+
+// ServerMetadata consists of metadata sent from gRPC server.
+type ServerMetadata struct {
+ HeaderMD metadata.MD
+ TrailerMD metadata.MD
+}
+
+type serverMetadataKey struct{}
+
+// NewServerMetadataContext creates a new context with ServerMetadata
+func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ return context.WithValue(ctx, serverMetadataKey{}, md)
+}
+
+// ServerMetadataFromContext returns the ServerMetadata in ctx
+func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
+ if ctx == nil {
+ return md, false
+ }
+ md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
+ return
+}
+
+// ServerTransportStream implements grpc.ServerTransportStream.
+// It should only be used by the generated files to support grpc.SendHeader
+// outside of gRPC server use.
+type ServerTransportStream struct {
+ mu sync.Mutex
+ header metadata.MD
+ trailer metadata.MD
+}
+
+// Method returns the method for the stream.
+func (s *ServerTransportStream) Method() string {
+ return ""
+}
+
+// Header returns the header metadata of the stream.
+func (s *ServerTransportStream) Header() metadata.MD {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.header.Copy()
+}
+
+// SetHeader sets the header metadata.
+func (s *ServerTransportStream) SetHeader(md metadata.MD) error {
+ if md.Len() == 0 {
+ return nil
+ }
+
+ s.mu.Lock()
+ s.header = metadata.Join(s.header, md)
+ s.mu.Unlock()
+ return nil
+}
+
+// SendHeader sets the header metadata.
+func (s *ServerTransportStream) SendHeader(md metadata.MD) error {
+ return s.SetHeader(md)
+}
+
+// Trailer returns the cached trailer metadata.
+func (s *ServerTransportStream) Trailer() metadata.MD {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.trailer.Copy()
+}
+
+// SetTrailer sets the trailer metadata.
+func (s *ServerTransportStream) SetTrailer(md metadata.MD) error {
+ if md.Len() == 0 {
+ return nil
+ }
+
+ s.mu.Lock()
+ s.trailer = metadata.Join(s.trailer, md)
+ s.mu.Unlock()
+ return nil
+}
+
+func timeoutDecode(s string) (time.Duration, error) {
+ size := len(s)
+ if size < 2 {
+ return 0, fmt.Errorf("timeout string is too short: %q", s)
+ }
+ d, ok := timeoutUnitToDuration(s[size-1])
+ if !ok {
+ return 0, fmt.Errorf("timeout unit is not recognized: %q", s)
+ }
+ t, err := strconv.ParseInt(s[:size-1], 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ return d * time.Duration(t), nil
+}
+
+func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
+ switch u {
+ case 'H':
+ return time.Hour, true
+ case 'M':
+ return time.Minute, true
+ case 'S':
+ return time.Second, true
+ case 'm':
+ return time.Millisecond, true
+ case 'u':
+ return time.Microsecond, true
+ case 'n':
+ return time.Nanosecond, true
+ default:
+ return
+ }
+}
+
+// isPermanentHTTPHeader checks whether hdr belongs to the list of
+// permanent request headers maintained by IANA.
+// http://www.iana.org/assignments/message-headers/message-headers.xml
+func isPermanentHTTPHeader(hdr string) bool {
+ switch hdr {
+ case
+ "Accept",
+ "Accept-Charset",
+ "Accept-Language",
+ "Accept-Ranges",
+ "Authorization",
+ "Cache-Control",
+ "Content-Type",
+ "Cookie",
+ "Date",
+ "Expect",
+ "From",
+ "Host",
+ "If-Match",
+ "If-Modified-Since",
+ "If-None-Match",
+ "If-Schedule-Tag-Match",
+ "If-Unmodified-Since",
+ "Max-Forwards",
+ "Origin",
+ "Pragma",
+ "Referer",
+ "User-Agent",
+ "Via",
+ "Warning":
+ return true
+ }
+ return false
+}
+
+// isMalformedHTTPHeader checks whether header belongs to the list of
+// "malformed headers" and would be rejected by the gRPC server.
+func isMalformedHTTPHeader(header string) bool {
+ _, isMalformed := malformedHTTPHeaders[strings.ToLower(header)]
+ return isMalformed
+}
+
+// RPCMethod returns the method string for the server context. The returned
+// string is in the format of "/package.service/method".
+func RPCMethod(ctx context.Context) (string, bool) {
+ m := ctx.Value(rpcMethodKey{})
+ if m == nil {
+ return "", false
+ }
+ ms, ok := m.(string)
+ if !ok {
+ return "", false
+ }
+ return ms, true
+}
+
+func withRPCMethod(ctx context.Context, rpcMethodName string) context.Context {
+ return context.WithValue(ctx, rpcMethodKey{}, rpcMethodName)
+}
+
+// HTTPPathPattern returns the HTTP path pattern string relating to the HTTP handler, if one exists.
+// The format of the returned string is defined by the google.api.http path template type.
+func HTTPPathPattern(ctx context.Context) (string, bool) {
+ m := ctx.Value(httpPathPatternKey{})
+ if m == nil {
+ return "", false
+ }
+ ms, ok := m.(string)
+ if !ok {
+ return "", false
+ }
+ return ms, true
+}
+
+func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context {
+ return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern)
+}
+
+// HTTPPattern returns the HTTP path pattern struct relating to the HTTP handler, if one exists.
+func HTTPPattern(ctx context.Context) (Pattern, bool) {
+ v, ok := ctx.Value(httpPatternKey{}).(Pattern)
+ return v, ok
+}
+
+func withHTTPPattern(ctx context.Context, httpPattern Pattern) context.Context {
+ return context.WithValue(ctx, httpPatternKey{}, httpPattern)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
new file mode 100644
index 00000000000..2e50082ad11
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
@@ -0,0 +1,318 @@
+package runtime
+
+import (
+ "encoding/base64"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/types/known/durationpb"
+ "google.golang.org/protobuf/types/known/timestamppb"
+ "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// String just returns the given string.
+// It is just for compatibility to other types.
+func String(val string) (string, error) {
+ return val, nil
+}
+
+// StringSlice converts 'val' where individual strings are separated by
+// 'sep' into a string slice.
+func StringSlice(val, sep string) ([]string, error) {
+ return strings.Split(val, sep), nil
+}
+
+// Bool converts the given string representation of a boolean value into bool.
+func Bool(val string) (bool, error) {
+ return strconv.ParseBool(val)
+}
+
+// BoolSlice converts 'val' where individual booleans are separated by
+// 'sep' into a bool slice.
+func BoolSlice(val, sep string) ([]bool, error) {
+ s := strings.Split(val, sep)
+ values := make([]bool, len(s))
+ for i, v := range s {
+ value, err := Bool(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Float64 converts the given string representation into representation of a floating point number into float64.
+func Float64(val string) (float64, error) {
+ return strconv.ParseFloat(val, 64)
+}
+
+// Float64Slice converts 'val' where individual floating point numbers are separated by
+// 'sep' into a float64 slice.
+func Float64Slice(val, sep string) ([]float64, error) {
+ s := strings.Split(val, sep)
+ values := make([]float64, len(s))
+ for i, v := range s {
+ value, err := Float64(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Float32 converts the given string representation of a floating point number into float32.
+func Float32(val string) (float32, error) {
+ f, err := strconv.ParseFloat(val, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+}
+
+// Float32Slice converts 'val' where individual floating point numbers are separated by
+// 'sep' into a float32 slice.
+func Float32Slice(val, sep string) ([]float32, error) {
+ s := strings.Split(val, sep)
+ values := make([]float32, len(s))
+ for i, v := range s {
+ value, err := Float32(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Int64 converts the given string representation of an integer into int64.
+func Int64(val string) (int64, error) {
+ return strconv.ParseInt(val, 0, 64)
+}
+
+// Int64Slice converts 'val' where individual integers are separated by
+// 'sep' into an int64 slice.
+func Int64Slice(val, sep string) ([]int64, error) {
+ s := strings.Split(val, sep)
+ values := make([]int64, len(s))
+ for i, v := range s {
+ value, err := Int64(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Int32 converts the given string representation of an integer into int32.
+func Int32(val string) (int32, error) {
+ i, err := strconv.ParseInt(val, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(i), nil
+}
+
+// Int32Slice converts 'val' where individual integers are separated by
+// 'sep' into an int32 slice.
+func Int32Slice(val, sep string) ([]int32, error) {
+ s := strings.Split(val, sep)
+ values := make([]int32, len(s))
+ for i, v := range s {
+ value, err := Int32(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Uint64 converts the given string representation of an integer into uint64.
+func Uint64(val string) (uint64, error) {
+ return strconv.ParseUint(val, 0, 64)
+}
+
+// Uint64Slice converts 'val' where individual integers are separated by
+// 'sep' into a uint64 slice.
+func Uint64Slice(val, sep string) ([]uint64, error) {
+ s := strings.Split(val, sep)
+ values := make([]uint64, len(s))
+ for i, v := range s {
+ value, err := Uint64(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Uint32 converts the given string representation of an integer into uint32.
+func Uint32(val string) (uint32, error) {
+ i, err := strconv.ParseUint(val, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return uint32(i), nil
+}
+
+// Uint32Slice converts 'val' where individual integers are separated by
+// 'sep' into a uint32 slice.
+func Uint32Slice(val, sep string) ([]uint32, error) {
+ s := strings.Split(val, sep)
+ values := make([]uint32, len(s))
+ for i, v := range s {
+ value, err := Uint32(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Bytes converts the given string representation of a byte sequence into a slice of bytes
+// A bytes sequence is encoded in URL-safe base64 without padding
+func Bytes(val string) ([]byte, error) {
+ b, err := base64.StdEncoding.DecodeString(val)
+ if err != nil {
+ b, err = base64.URLEncoding.DecodeString(val)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return b, nil
+}
+
+// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe
+// base64 without padding, are separated by 'sep' into a slice of byte slices.
+func BytesSlice(val, sep string) ([][]byte, error) {
+ s := strings.Split(val, sep)
+ values := make([][]byte, len(s))
+ for i, v := range s {
+ value, err := Bytes(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp.
+func Timestamp(val string) (*timestamppb.Timestamp, error) {
+ var r timestamppb.Timestamp
+ val = strconv.Quote(strings.Trim(val, `"`))
+ unmarshaler := &protojson.UnmarshalOptions{}
+ if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil {
+ return nil, err
+ }
+ return &r, nil
+}
+
+// Duration converts the given string into a timestamp.Duration.
+func Duration(val string) (*durationpb.Duration, error) {
+ var r durationpb.Duration
+ val = strconv.Quote(strings.Trim(val, `"`))
+ unmarshaler := &protojson.UnmarshalOptions{}
+ if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil {
+ return nil, err
+ }
+ return &r, nil
+}
+
+// Enum converts the given string into an int32 that should be type casted into the
+// correct enum proto type.
+func Enum(val string, enumValMap map[string]int32) (int32, error) {
+ e, ok := enumValMap[val]
+ if ok {
+ return e, nil
+ }
+
+ i, err := Int32(val)
+ if err != nil {
+ return 0, fmt.Errorf("%s is not valid", val)
+ }
+ for _, v := range enumValMap {
+ if v == i {
+ return i, nil
+ }
+ }
+ return 0, fmt.Errorf("%s is not valid", val)
+}
+
+// EnumSlice converts 'val' where individual enums are separated by 'sep'
+// into a int32 slice. Each individual int32 should be type casted into the
+// correct enum proto type.
+func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
+ s := strings.Split(val, sep)
+ values := make([]int32, len(s))
+ for i, v := range s {
+ value, err := Enum(v, enumValMap)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Support for google.protobuf.wrappers on top of primitive types
+
+// StringValue well-known type support as wrapper around string type
+func StringValue(val string) (*wrapperspb.StringValue, error) {
+ return wrapperspb.String(val), nil
+}
+
+// FloatValue well-known type support as wrapper around float32 type
+func FloatValue(val string) (*wrapperspb.FloatValue, error) {
+ parsedVal, err := Float32(val)
+ return wrapperspb.Float(parsedVal), err
+}
+
+// DoubleValue well-known type support as wrapper around float64 type
+func DoubleValue(val string) (*wrapperspb.DoubleValue, error) {
+ parsedVal, err := Float64(val)
+ return wrapperspb.Double(parsedVal), err
+}
+
+// BoolValue well-known type support as wrapper around bool type
+func BoolValue(val string) (*wrapperspb.BoolValue, error) {
+ parsedVal, err := Bool(val)
+ return wrapperspb.Bool(parsedVal), err
+}
+
+// Int32Value well-known type support as wrapper around int32 type
+func Int32Value(val string) (*wrapperspb.Int32Value, error) {
+ parsedVal, err := Int32(val)
+ return wrapperspb.Int32(parsedVal), err
+}
+
+// UInt32Value well-known type support as wrapper around uint32 type
+func UInt32Value(val string) (*wrapperspb.UInt32Value, error) {
+ parsedVal, err := Uint32(val)
+ return wrapperspb.UInt32(parsedVal), err
+}
+
+// Int64Value well-known type support as wrapper around int64 type
+func Int64Value(val string) (*wrapperspb.Int64Value, error) {
+ parsedVal, err := Int64(val)
+ return wrapperspb.Int64(parsedVal), err
+}
+
+// UInt64Value well-known type support as wrapper around uint64 type
+func UInt64Value(val string) (*wrapperspb.UInt64Value, error) {
+ parsedVal, err := Uint64(val)
+ return wrapperspb.UInt64(parsedVal), err
+}
+
+// BytesValue well-known type support as wrapper around bytes[] type
+func BytesValue(val string) (*wrapperspb.BytesValue, error) {
+ parsedVal, err := Bytes(val)
+ return wrapperspb.Bytes(parsedVal), err
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go
new file mode 100644
index 00000000000..b6e5ddf7a9f
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go
@@ -0,0 +1,5 @@
+/*
+Package runtime contains runtime helper functions used by
+servers which protoc-gen-grpc-gateway generates.
+*/
+package runtime
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
new file mode 100644
index 00000000000..bbe7decf09b
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
@@ -0,0 +1,204 @@
+package runtime
+
+import (
+ "context"
+ "errors"
+ "io"
+ "net/http"
+
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/status"
+)
+
+// ErrorHandlerFunc is the signature used to configure error handling.
+type ErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
+
+// StreamErrorHandlerFunc is the signature used to configure stream error handling.
+type StreamErrorHandlerFunc func(context.Context, error) *status.Status
+
+// RoutingErrorHandlerFunc is the signature used to configure error handling for routing errors.
+type RoutingErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, int)
+
+// HTTPStatusError is the error to use when needing to provide a different HTTP status code for an error
+// passed to the DefaultRoutingErrorHandler.
+type HTTPStatusError struct {
+ HTTPStatus int
+ Err error
+}
+
+func (e *HTTPStatusError) Error() string {
+ return e.Err.Error()
+}
+
+// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
+// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
+func HTTPStatusFromCode(code codes.Code) int {
+ switch code {
+ case codes.OK:
+ return http.StatusOK
+ case codes.Canceled:
+ return 499
+ case codes.Unknown:
+ return http.StatusInternalServerError
+ case codes.InvalidArgument:
+ return http.StatusBadRequest
+ case codes.DeadlineExceeded:
+ return http.StatusGatewayTimeout
+ case codes.NotFound:
+ return http.StatusNotFound
+ case codes.AlreadyExists:
+ return http.StatusConflict
+ case codes.PermissionDenied:
+ return http.StatusForbidden
+ case codes.Unauthenticated:
+ return http.StatusUnauthorized
+ case codes.ResourceExhausted:
+ return http.StatusTooManyRequests
+ case codes.FailedPrecondition:
+ // Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status.
+ return http.StatusBadRequest
+ case codes.Aborted:
+ return http.StatusConflict
+ case codes.OutOfRange:
+ return http.StatusBadRequest
+ case codes.Unimplemented:
+ return http.StatusNotImplemented
+ case codes.Internal:
+ return http.StatusInternalServerError
+ case codes.Unavailable:
+ return http.StatusServiceUnavailable
+ case codes.DataLoss:
+ return http.StatusInternalServerError
+ default:
+ grpclog.Warningf("Unknown gRPC error code: %v", code)
+ return http.StatusInternalServerError
+ }
+}
+
+// HTTPError uses the mux-configured error handler.
+func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
+ mux.errorHandler(ctx, mux, marshaler, w, r, err)
+}
+
+// HTTPStreamError uses the mux-configured stream error handler to notify error to the client without closing the connection.
+func HTTPStreamError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
+ st := mux.streamErrorHandler(ctx, err)
+ msg := errorChunk(st)
+ buf, err := marshaler.Marshal(msg)
+ if err != nil {
+ grpclog.Errorf("Failed to marshal an error: %v", err)
+ return
+ }
+ if _, err := w.Write(buf); err != nil {
+ grpclog.Errorf("Failed to notify error to client: %v", err)
+ return
+ }
+}
+
+// DefaultHTTPErrorHandler is the default error handler.
+// If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode.
+// If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is
+// intended to allow passing through of specific statuses via the function set via WithRoutingErrorHandler
+// for the ServeMux constructor to handle edge cases which the standard mappings in HTTPStatusFromCode
+// are insufficient for.
+// If otherwise, it replies with http.StatusInternalServerError.
+//
+// The response body written by this function is a Status message marshaled by the Marshaler.
+func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
+ // return Internal when Marshal failed
+ const fallback = `{"code": 13, "message": "failed to marshal error message"}`
+ const fallbackRewriter = `{"code": 13, "message": "failed to rewrite error message"}`
+
+ var customStatus *HTTPStatusError
+ if errors.As(err, &customStatus) {
+ err = customStatus.Err
+ }
+
+ s := status.Convert(err)
+
+ w.Header().Del("Trailer")
+ w.Header().Del("Transfer-Encoding")
+
+ respRw, err := mux.forwardResponseRewriter(ctx, s.Proto())
+ if err != nil {
+ grpclog.Errorf("Failed to rewrite error message %q: %v", s, err)
+ w.WriteHeader(http.StatusInternalServerError)
+ if _, err := io.WriteString(w, fallbackRewriter); err != nil {
+ grpclog.Errorf("Failed to write response: %v", err)
+ }
+ return
+ }
+
+ contentType := marshaler.ContentType(respRw)
+ w.Header().Set("Content-Type", contentType)
+
+ if s.Code() == codes.Unauthenticated {
+ w.Header().Set("WWW-Authenticate", s.Message())
+ }
+
+ buf, merr := marshaler.Marshal(respRw)
+ if merr != nil {
+ grpclog.Errorf("Failed to marshal error message %q: %v", s, merr)
+ w.WriteHeader(http.StatusInternalServerError)
+ if _, err := io.WriteString(w, fallback); err != nil {
+ grpclog.Errorf("Failed to write response: %v", err)
+ }
+ return
+ }
+
+ md, ok := ServerMetadataFromContext(ctx)
+ if ok {
+ handleForwardResponseServerMetadata(w, mux, md)
+
+ // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
+ // Unless the request includes a TE header field indicating "trailers"
+ // is acceptable, as described in Section 4.3, a server SHOULD NOT
+ // generate trailer fields that it believes are necessary for the user
+ // agent to receive.
+ doForwardTrailers := requestAcceptsTrailers(r)
+
+ if doForwardTrailers {
+ handleForwardResponseTrailerHeader(w, mux, md)
+ w.Header().Set("Transfer-Encoding", "chunked")
+ }
+ }
+
+ st := HTTPStatusFromCode(s.Code())
+ if customStatus != nil {
+ st = customStatus.HTTPStatus
+ }
+
+ w.WriteHeader(st)
+ if _, err := w.Write(buf); err != nil {
+ grpclog.Errorf("Failed to write response: %v", err)
+ }
+
+ if ok && requestAcceptsTrailers(r) {
+ handleForwardResponseTrailer(w, mux, md)
+ }
+}
+
+func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status {
+ return status.Convert(err)
+}
+
+// DefaultRoutingErrorHandler is our default handler for routing errors.
+// By default http error codes mapped on the following error codes:
+//
+// NotFound -> grpc.NotFound
+// StatusBadRequest -> grpc.InvalidArgument
+// MethodNotAllowed -> grpc.Unimplemented
+// Other -> grpc.Internal, method is not expecting to be called for anything else
+func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) {
+ sterr := status.Error(codes.Internal, "Unexpected routing error")
+ switch httpStatus {
+ case http.StatusBadRequest:
+ sterr = status.Error(codes.InvalidArgument, http.StatusText(httpStatus))
+ case http.StatusMethodNotAllowed:
+ sterr = status.Error(codes.Unimplemented, http.StatusText(httpStatus))
+ case http.StatusNotFound:
+ sterr = status.Error(codes.NotFound, http.StatusText(httpStatus))
+ }
+ mux.errorHandler(ctx, mux, marshaler, w, r, sterr)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
new file mode 100644
index 00000000000..2fcd7af3c40
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
@@ -0,0 +1,168 @@
+package runtime
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "sort"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ field_mask "google.golang.org/protobuf/types/known/fieldmaskpb"
+)
+
+func getFieldByName(fields protoreflect.FieldDescriptors, name string) protoreflect.FieldDescriptor {
+ fd := fields.ByName(protoreflect.Name(name))
+ if fd != nil {
+ return fd
+ }
+
+ return fields.ByJSONName(name)
+}
+
+// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
+func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.FieldMask, error) {
+ fm := &field_mask.FieldMask{}
+ var root interface{}
+
+ if err := json.NewDecoder(r).Decode(&root); err != nil {
+ if errors.Is(err, io.EOF) {
+ return fm, nil
+ }
+ return nil, err
+ }
+
+ queue := []fieldMaskPathItem{{node: root, msg: msg.ProtoReflect()}}
+ for len(queue) > 0 {
+ // dequeue an item
+ item := queue[0]
+ queue = queue[1:]
+
+ m, ok := item.node.(map[string]interface{})
+ switch {
+ case ok && len(m) > 0:
+ // if the item is an object, then enqueue all of its children
+ for k, v := range m {
+ if item.msg == nil {
+ return nil, errors.New("JSON structure did not match request type")
+ }
+
+ fd := getFieldByName(item.msg.Descriptor().Fields(), k)
+ if fd == nil {
+ return nil, fmt.Errorf("could not find field %q in %q", k, item.msg.Descriptor().FullName())
+ }
+
+ if isDynamicProtoMessage(fd.Message()) {
+ for _, p := range buildPathsBlindly(string(fd.FullName().Name()), v) {
+ newPath := p
+ if item.path != "" {
+ newPath = item.path + "." + newPath
+ }
+ queue = append(queue, fieldMaskPathItem{path: newPath})
+ }
+ continue
+ }
+
+ if isProtobufAnyMessage(fd.Message()) && !fd.IsList() {
+ _, hasTypeField := v.(map[string]interface{})["@type"]
+ if hasTypeField {
+ queue = append(queue, fieldMaskPathItem{path: k})
+ continue
+ } else {
+ return nil, fmt.Errorf("could not find field @type in %q in message %q", k, item.msg.Descriptor().FullName())
+ }
+
+ }
+
+ child := fieldMaskPathItem{
+ node: v,
+ }
+ if item.path == "" {
+ child.path = string(fd.FullName().Name())
+ } else {
+ child.path = item.path + "." + string(fd.FullName().Name())
+ }
+
+ switch {
+ case fd.IsList(), fd.IsMap():
+ // As per: https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/field_mask.proto#L85-L86
+ // Do not recurse into repeated fields. The repeated field goes on the end of the path and we stop.
+ fm.Paths = append(fm.Paths, child.path)
+ case fd.Message() != nil:
+ child.msg = item.msg.Get(fd).Message()
+ fallthrough
+ default:
+ queue = append(queue, child)
+ }
+ }
+ case ok && len(m) == 0:
+ fallthrough
+ case len(item.path) > 0:
+ // otherwise, it's a leaf node so print its path
+ fm.Paths = append(fm.Paths, item.path)
+ }
+ }
+
+ // Sort for deterministic output in the presence
+ // of repeated fields.
+ sort.Strings(fm.Paths)
+
+ return fm, nil
+}
+
+func isProtobufAnyMessage(md protoreflect.MessageDescriptor) bool {
+ return md != nil && (md.FullName() == "google.protobuf.Any")
+}
+
+func isDynamicProtoMessage(md protoreflect.MessageDescriptor) bool {
+ return md != nil && (md.FullName() == "google.protobuf.Struct" || md.FullName() == "google.protobuf.Value")
+}
+
+// buildPathsBlindly does not attempt to match proto field names to the
+// json value keys. Instead it relies completely on the structure of
+// the unmarshalled json contained within in.
+// Returns a slice containing all subpaths with the root at the
+// passed in name and json value.
+func buildPathsBlindly(name string, in interface{}) []string {
+ m, ok := in.(map[string]interface{})
+ if !ok {
+ return []string{name}
+ }
+
+ var paths []string
+ queue := []fieldMaskPathItem{{path: name, node: m}}
+ for len(queue) > 0 {
+ cur := queue[0]
+ queue = queue[1:]
+
+ m, ok := cur.node.(map[string]interface{})
+ if !ok {
+ // This should never happen since we should always check that we only add
+ // nodes of type map[string]interface{} to the queue.
+ continue
+ }
+ for k, v := range m {
+ if mi, ok := v.(map[string]interface{}); ok {
+ queue = append(queue, fieldMaskPathItem{path: cur.path + "." + k, node: mi})
+ } else {
+ // This is not a struct, so there are no more levels to descend.
+ curPath := cur.path + "." + k
+ paths = append(paths, curPath)
+ }
+ }
+ }
+ return paths
+}
+
+// fieldMaskPathItem stores an in-progress deconstruction of a path for a fieldmask
+type fieldMaskPathItem struct {
+ // the list of prior fields leading up to node connected by dots
+ path string
+
+ // a generic decoded json object the current item to inspect for further path extraction
+ node interface{}
+
+ // parent message
+ msg protoreflect.Message
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
new file mode 100644
index 00000000000..2f0b9e9e0f8
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
@@ -0,0 +1,251 @@
+package runtime
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/textproto"
+ "strconv"
+ "strings"
+
+ "google.golang.org/genproto/googleapis/api/httpbody"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+)
+
+// ForwardResponseStream forwards the stream from gRPC server to REST client.
+func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
+ rc := http.NewResponseController(w)
+ md, ok := ServerMetadataFromContext(ctx)
+ if !ok {
+ grpclog.Error("Failed to extract ServerMetadata from context")
+ http.Error(w, "unexpected error", http.StatusInternalServerError)
+ return
+ }
+ handleForwardResponseServerMetadata(w, mux, md)
+
+ w.Header().Set("Transfer-Encoding", "chunked")
+ if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
+ HTTPError(ctx, mux, marshaler, w, req, err)
+ return
+ }
+
+ var delimiter []byte
+ if d, ok := marshaler.(Delimited); ok {
+ delimiter = d.Delimiter()
+ } else {
+ delimiter = []byte("\n")
+ }
+
+ var wroteHeader bool
+ for {
+ resp, err := recv()
+ if errors.Is(err, io.EOF) {
+ return
+ }
+ if err != nil {
+ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
+ return
+ }
+ if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
+ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
+ return
+ }
+
+ respRw, err := mux.forwardResponseRewriter(ctx, resp)
+ if err != nil {
+ grpclog.Errorf("Rewrite error: %v", err)
+ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
+ return
+ }
+
+ if !wroteHeader {
+ var contentType string
+ if sct, ok := marshaler.(StreamContentType); ok {
+ contentType = sct.StreamContentType(respRw)
+ } else {
+ contentType = marshaler.ContentType(respRw)
+ }
+ w.Header().Set("Content-Type", contentType)
+ }
+
+ var buf []byte
+ httpBody, isHTTPBody := respRw.(*httpbody.HttpBody)
+ switch {
+ case respRw == nil:
+ buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response")))
+ case isHTTPBody:
+ buf = httpBody.GetData()
+ default:
+ result := map[string]interface{}{"result": respRw}
+ if rb, ok := respRw.(responseBody); ok {
+ result["result"] = rb.XXX_ResponseBody()
+ }
+
+ buf, err = marshaler.Marshal(result)
+ }
+
+ if err != nil {
+ grpclog.Errorf("Failed to marshal response chunk: %v", err)
+ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
+ return
+ }
+ if _, err := w.Write(buf); err != nil {
+ grpclog.Errorf("Failed to send response chunk: %v", err)
+ return
+ }
+ wroteHeader = true
+ if _, err := w.Write(delimiter); err != nil {
+ grpclog.Errorf("Failed to send delimiter chunk: %v", err)
+ return
+ }
+ err = rc.Flush()
+ if err != nil {
+ if errors.Is(err, http.ErrNotSupported) {
+ grpclog.Errorf("Flush not supported in %T", w)
+ http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
+ return
+ }
+ grpclog.Errorf("Failed to flush response to client: %v", err)
+ return
+ }
+ }
+}
+
+func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
+ for k, vs := range md.HeaderMD {
+ if h, ok := mux.outgoingHeaderMatcher(k); ok {
+ for _, v := range vs {
+ w.Header().Add(h, v)
+ }
+ }
+ }
+}
+
+func handleForwardResponseTrailerHeader(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
+ for k := range md.TrailerMD {
+ if h, ok := mux.outgoingTrailerMatcher(k); ok {
+ w.Header().Add("Trailer", textproto.CanonicalMIMEHeaderKey(h))
+ }
+ }
+}
+
+func handleForwardResponseTrailer(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
+ for k, vs := range md.TrailerMD {
+ if h, ok := mux.outgoingTrailerMatcher(k); ok {
+ for _, v := range vs {
+ w.Header().Add(h, v)
+ }
+ }
+ }
+}
+
+// responseBody interface contains method for getting field for marshaling to the response body
+// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule`
+type responseBody interface {
+ XXX_ResponseBody() interface{}
+}
+
+// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
+func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
+ md, ok := ServerMetadataFromContext(ctx)
+ if ok {
+ handleForwardResponseServerMetadata(w, mux, md)
+ }
+
+ // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
+ // Unless the request includes a TE header field indicating "trailers"
+ // is acceptable, as described in Section 4.3, a server SHOULD NOT
+ // generate trailer fields that it believes are necessary for the user
+ // agent to receive.
+ doForwardTrailers := requestAcceptsTrailers(req)
+
+ if ok && doForwardTrailers {
+ handleForwardResponseTrailerHeader(w, mux, md)
+ w.Header().Set("Transfer-Encoding", "chunked")
+ }
+
+ contentType := marshaler.ContentType(resp)
+ w.Header().Set("Content-Type", contentType)
+
+ if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
+ HTTPError(ctx, mux, marshaler, w, req, err)
+ return
+ }
+ respRw, err := mux.forwardResponseRewriter(ctx, resp)
+ if err != nil {
+ grpclog.Errorf("Rewrite error: %v", err)
+ HTTPError(ctx, mux, marshaler, w, req, err)
+ return
+ }
+ var buf []byte
+ if rb, ok := respRw.(responseBody); ok {
+ buf, err = marshaler.Marshal(rb.XXX_ResponseBody())
+ } else {
+ buf, err = marshaler.Marshal(respRw)
+ }
+ if err != nil {
+ grpclog.Errorf("Marshal error: %v", err)
+ HTTPError(ctx, mux, marshaler, w, req, err)
+ return
+ }
+
+ if !doForwardTrailers && mux.writeContentLength {
+ w.Header().Set("Content-Length", strconv.Itoa(len(buf)))
+ }
+
+ if _, err = w.Write(buf); err != nil && !errors.Is(err, http.ErrBodyNotAllowed) {
+ grpclog.Errorf("Failed to write response: %v", err)
+ }
+
+ if ok && doForwardTrailers {
+ handleForwardResponseTrailer(w, mux, md)
+ }
+}
+
+func requestAcceptsTrailers(req *http.Request) bool {
+ te := req.Header.Get("TE")
+ return strings.Contains(strings.ToLower(te), "trailers")
+}
+
+func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
+ if len(opts) == 0 {
+ return nil
+ }
+ for _, opt := range opts {
+ if err := opt(ctx, w, resp); err != nil {
+ return fmt.Errorf("error handling ForwardResponseOptions: %w", err)
+ }
+ }
+ return nil
+}
+
+func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error, delimiter []byte) {
+ st := mux.streamErrorHandler(ctx, err)
+ msg := errorChunk(st)
+ if !wroteHeader {
+ w.Header().Set("Content-Type", marshaler.ContentType(msg))
+ w.WriteHeader(HTTPStatusFromCode(st.Code()))
+ }
+ buf, err := marshaler.Marshal(msg)
+ if err != nil {
+ grpclog.Errorf("Failed to marshal an error: %v", err)
+ return
+ }
+ if _, err := w.Write(buf); err != nil {
+ grpclog.Errorf("Failed to notify error to client: %v", err)
+ return
+ }
+ if _, err := w.Write(delimiter); err != nil {
+ grpclog.Errorf("Failed to send delimiter chunk: %v", err)
+ return
+ }
+}
+
+func errorChunk(st *status.Status) map[string]proto.Message {
+ return map[string]proto.Message{"error": st.Proto()}
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go
new file mode 100644
index 00000000000..6de2e220c7f
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go
@@ -0,0 +1,32 @@
+package runtime
+
+import (
+ "google.golang.org/genproto/googleapis/api/httpbody"
+)
+
+// HTTPBodyMarshaler is a Marshaler which supports marshaling of a
+// google.api.HttpBody message as the full response body if it is
+// the actual message used as the response. If not, then this will
+// simply fallback to the Marshaler specified as its default Marshaler.
+type HTTPBodyMarshaler struct {
+ Marshaler
+}
+
+// ContentType returns its specified content type in case v is a
+// google.api.HttpBody message, otherwise it will fall back to the default Marshalers
+// content type.
+func (h *HTTPBodyMarshaler) ContentType(v interface{}) string {
+ if httpBody, ok := v.(*httpbody.HttpBody); ok {
+ return httpBody.GetContentType()
+ }
+ return h.Marshaler.ContentType(v)
+}
+
+// Marshal marshals "v" by returning the body bytes if v is a
+// google.api.HttpBody message, otherwise it falls back to the default Marshaler.
+func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) {
+ if httpBody, ok := v.(*httpbody.HttpBody); ok {
+ return httpBody.GetData(), nil
+ }
+ return h.Marshaler.Marshal(v)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go
new file mode 100644
index 00000000000..fe52081ab94
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go
@@ -0,0 +1,50 @@
+package runtime
+
+import (
+ "encoding/json"
+ "io"
+)
+
+// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON
+// with the standard "encoding/json" package of Golang.
+// Although it is generally faster for simple proto messages than JSONPb,
+// it does not support advanced features of protobuf, e.g. map, oneof, ....
+//
+// The NewEncoder and NewDecoder types return *json.Encoder and
+// *json.Decoder respectively.
+type JSONBuiltin struct{}
+
+// ContentType always Returns "application/json".
+func (*JSONBuiltin) ContentType(_ interface{}) string {
+ return "application/json"
+}
+
+// Marshal marshals "v" into JSON
+func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) {
+ return json.Marshal(v)
+}
+
+// MarshalIndent is like Marshal but applies Indent to format the output
+func (j *JSONBuiltin) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ return json.MarshalIndent(v, prefix, indent)
+}
+
+// Unmarshal unmarshals JSON data into "v".
+func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error {
+ return json.Unmarshal(data, v)
+}
+
+// NewDecoder returns a Decoder which reads JSON stream from "r".
+func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder {
+ return json.NewDecoder(r)
+}
+
+// NewEncoder returns an Encoder which writes JSON stream into "w".
+func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder {
+ return json.NewEncoder(w)
+}
+
+// Delimiter for newline encoded JSON streams.
+func (j *JSONBuiltin) Delimiter() []byte {
+ return []byte("\n")
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
new file mode 100644
index 00000000000..3d07063007d
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
@@ -0,0 +1,349 @@
+package runtime
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
+)
+
+// JSONPb is a Marshaler which marshals/unmarshals into/from JSON
+// with the "google.golang.org/protobuf/encoding/protojson" marshaler.
+// It supports the full functionality of protobuf unlike JSONBuiltin.
+//
+// The NewDecoder method returns a DecoderWrapper, so the underlying
+// *json.Decoder methods can be used.
+type JSONPb struct {
+ protojson.MarshalOptions
+ protojson.UnmarshalOptions
+}
+
+// ContentType always returns "application/json".
+func (*JSONPb) ContentType(_ interface{}) string {
+ return "application/json"
+}
+
+// Marshal marshals "v" into JSON.
+func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
+ var buf bytes.Buffer
+ if err := j.marshalTo(&buf, v); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
+ p, ok := v.(proto.Message)
+ if !ok {
+ buf, err := j.marshalNonProtoField(v)
+ if err != nil {
+ return err
+ }
+ if j.Indent != "" {
+ b := &bytes.Buffer{}
+ if err := json.Indent(b, buf, "", j.Indent); err != nil {
+ return err
+ }
+ buf = b.Bytes()
+ }
+ _, err = w.Write(buf)
+ return err
+ }
+
+ b, err := j.MarshalOptions.Marshal(p)
+ if err != nil {
+ return err
+ }
+
+ _, err = w.Write(b)
+ return err
+}
+
+var (
+ // protoMessageType is stored to prevent constant lookup of the same type at runtime.
+ protoMessageType = reflect.TypeFor[proto.Message]()
+)
+
+// marshalNonProto marshals a non-message field of a protobuf message.
+// This function does not correctly marshal arbitrary data structures into JSON,
+// it is only capable of marshaling non-message field values of protobuf,
+// i.e. primitive types, enums; pointers to primitives or enums; maps from
+// integer/string types to primitives/enums/pointers to messages.
+func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
+ if v == nil {
+ return []byte("null"), nil
+ }
+ rv := reflect.ValueOf(v)
+ for rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ return []byte("null"), nil
+ }
+ rv = rv.Elem()
+ }
+
+ if rv.Kind() == reflect.Slice {
+ if rv.IsNil() {
+ if j.EmitUnpopulated {
+ return []byte("[]"), nil
+ }
+ return []byte("null"), nil
+ }
+
+ if rv.Type().Elem().Implements(protoMessageType) {
+ var buf bytes.Buffer
+ if err := buf.WriteByte('['); err != nil {
+ return nil, err
+ }
+ for i := 0; i < rv.Len(); i++ {
+ if i != 0 {
+ if err := buf.WriteByte(','); err != nil {
+ return nil, err
+ }
+ }
+ if err := j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
+ return nil, err
+ }
+ }
+ if err := buf.WriteByte(']'); err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+ }
+
+ if rv.Type().Elem().Implements(typeProtoEnum) {
+ var buf bytes.Buffer
+ if err := buf.WriteByte('['); err != nil {
+ return nil, err
+ }
+ for i := 0; i < rv.Len(); i++ {
+ if i != 0 {
+ if err := buf.WriteByte(','); err != nil {
+ return nil, err
+ }
+ }
+ var err error
+ if j.UseEnumNumbers {
+ _, err = buf.WriteString(strconv.FormatInt(rv.Index(i).Int(), 10))
+ } else {
+ _, err = buf.WriteString("\"" + rv.Index(i).Interface().(protoEnum).String() + "\"")
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ if err := buf.WriteByte(']'); err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+ }
+ }
+
+ if rv.Kind() == reflect.Map {
+ m := make(map[string]*json.RawMessage)
+ for _, k := range rv.MapKeys() {
+ buf, err := j.Marshal(rv.MapIndex(k).Interface())
+ if err != nil {
+ return nil, err
+ }
+ m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
+ }
+ return json.Marshal(m)
+ }
+ if enum, ok := rv.Interface().(protoEnum); ok && !j.UseEnumNumbers {
+ return json.Marshal(enum.String())
+ }
+ return json.Marshal(rv.Interface())
+}
+
+// Unmarshal unmarshals JSON "data" into "v"
+func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
+ return unmarshalJSONPb(data, j.UnmarshalOptions, v)
+}
+
+// NewDecoder returns a Decoder which reads JSON stream from "r".
+func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
+ d := json.NewDecoder(r)
+ return DecoderWrapper{
+ Decoder: d,
+ UnmarshalOptions: j.UnmarshalOptions,
+ }
+}
+
+// DecoderWrapper is a wrapper around a *json.Decoder that adds
+// support for protos to the Decode method.
+type DecoderWrapper struct {
+ *json.Decoder
+ protojson.UnmarshalOptions
+}
+
+// Decode wraps the embedded decoder's Decode method to support
+// protos using a jsonpb.Unmarshaler.
+func (d DecoderWrapper) Decode(v interface{}) error {
+ return decodeJSONPb(d.Decoder, d.UnmarshalOptions, v)
+}
+
+// NewEncoder returns an Encoder which writes JSON stream into "w".
+func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
+ return EncoderFunc(func(v interface{}) error {
+ if err := j.marshalTo(w, v); err != nil {
+ return err
+ }
+ // mimic json.Encoder by adding a newline (makes output
+ // easier to read when it contains multiple encoded items)
+ _, err := w.Write(j.Delimiter())
+ return err
+ })
+}
+
+func unmarshalJSONPb(data []byte, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
+ d := json.NewDecoder(bytes.NewReader(data))
+ return decodeJSONPb(d, unmarshaler, v)
+}
+
+func decodeJSONPb(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
+ p, ok := v.(proto.Message)
+ if !ok {
+ return decodeNonProtoField(d, unmarshaler, v)
+ }
+
+ // Decode into bytes for marshalling
+ var b json.RawMessage
+ if err := d.Decode(&b); err != nil {
+ return err
+ }
+
+ return unmarshaler.Unmarshal([]byte(b), p)
+}
+
+func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr {
+ return fmt.Errorf("%T is not a pointer", v)
+ }
+ for rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ rv.Set(reflect.New(rv.Type().Elem()))
+ }
+ if rv.Type().ConvertibleTo(typeProtoMessage) {
+ // Decode into bytes for marshalling
+ var b json.RawMessage
+ if err := d.Decode(&b); err != nil {
+ return err
+ }
+
+ return unmarshaler.Unmarshal([]byte(b), rv.Interface().(proto.Message))
+ }
+ rv = rv.Elem()
+ }
+ if rv.Kind() == reflect.Map {
+ if rv.IsNil() {
+ rv.Set(reflect.MakeMap(rv.Type()))
+ }
+ conv, ok := convFromType[rv.Type().Key().Kind()]
+ if !ok {
+ return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key())
+ }
+
+ m := make(map[string]*json.RawMessage)
+ if err := d.Decode(&m); err != nil {
+ return err
+ }
+ for k, v := range m {
+ result := conv.Call([]reflect.Value{reflect.ValueOf(k)})
+ if err := result[1].Interface(); err != nil {
+ return err.(error)
+ }
+ bk := result[0]
+ bv := reflect.New(rv.Type().Elem())
+ if v == nil {
+ null := json.RawMessage("null")
+ v = &null
+ }
+ if err := unmarshalJSONPb([]byte(*v), unmarshaler, bv.Interface()); err != nil {
+ return err
+ }
+ rv.SetMapIndex(bk, bv.Elem())
+ }
+ return nil
+ }
+ if rv.Kind() == reflect.Slice {
+ if rv.Type().Elem().Kind() == reflect.Uint8 {
+ var sl []byte
+ if err := d.Decode(&sl); err != nil {
+ return err
+ }
+ if sl != nil {
+ rv.SetBytes(sl)
+ }
+ return nil
+ }
+
+ var sl []json.RawMessage
+ if err := d.Decode(&sl); err != nil {
+ return err
+ }
+ if sl != nil {
+ rv.Set(reflect.MakeSlice(rv.Type(), 0, 0))
+ }
+ for _, item := range sl {
+ bv := reflect.New(rv.Type().Elem())
+ if err := unmarshalJSONPb([]byte(item), unmarshaler, bv.Interface()); err != nil {
+ return err
+ }
+ rv.Set(reflect.Append(rv, bv.Elem()))
+ }
+ return nil
+ }
+ if _, ok := rv.Interface().(protoEnum); ok {
+ var repr interface{}
+ if err := d.Decode(&repr); err != nil {
+ return err
+ }
+ switch v := repr.(type) {
+ case string:
+ // TODO(yugui) Should use proto.StructProperties?
+ return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
+ case float64:
+ rv.Set(reflect.ValueOf(int32(v)).Convert(rv.Type()))
+ return nil
+ default:
+ return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
+ }
+ }
+ return d.Decode(v)
+}
+
+type protoEnum interface {
+ fmt.Stringer
+ EnumDescriptor() ([]byte, []int)
+}
+
+var typeProtoEnum = reflect.TypeFor[protoEnum]()
+
+var typeProtoMessage = reflect.TypeFor[proto.Message]()
+
+// Delimiter for newline encoded JSON streams.
+func (j *JSONPb) Delimiter() []byte {
+ return []byte("\n")
+}
+
+var (
+ convFromType = map[reflect.Kind]reflect.Value{
+ reflect.String: reflect.ValueOf(String),
+ reflect.Bool: reflect.ValueOf(Bool),
+ reflect.Float64: reflect.ValueOf(Float64),
+ reflect.Float32: reflect.ValueOf(Float32),
+ reflect.Int64: reflect.ValueOf(Int64),
+ reflect.Int32: reflect.ValueOf(Int32),
+ reflect.Uint64: reflect.ValueOf(Uint64),
+ reflect.Uint32: reflect.ValueOf(Uint32),
+ reflect.Slice: reflect.ValueOf(Bytes),
+ }
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
new file mode 100644
index 00000000000..398c780dc22
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
@@ -0,0 +1,60 @@
+package runtime
+
+import (
+ "errors"
+ "io"
+
+ "google.golang.org/protobuf/proto"
+)
+
+// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes
+type ProtoMarshaller struct{}
+
+// ContentType always returns "application/octet-stream".
+func (*ProtoMarshaller) ContentType(_ interface{}) string {
+ return "application/octet-stream"
+}
+
+// Marshal marshals "value" into Proto
+func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) {
+ message, ok := value.(proto.Message)
+ if !ok {
+ return nil, errors.New("unable to marshal non proto field")
+ }
+ return proto.Marshal(message)
+}
+
+// Unmarshal unmarshals proto "data" into "value"
+func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error {
+ message, ok := value.(proto.Message)
+ if !ok {
+ return errors.New("unable to unmarshal non proto field")
+ }
+ return proto.Unmarshal(data, message)
+}
+
+// NewDecoder returns a Decoder which reads proto stream from "reader".
+func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder {
+ return DecoderFunc(func(value interface{}) error {
+ buffer, err := io.ReadAll(reader)
+ if err != nil {
+ return err
+ }
+ return marshaller.Unmarshal(buffer, value)
+ })
+}
+
+// NewEncoder returns an Encoder which writes proto stream into "writer".
+func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder {
+ return EncoderFunc(func(value interface{}) error {
+ buffer, err := marshaller.Marshal(value)
+ if err != nil {
+ return err
+ }
+ if _, err := writer.Write(buffer); err != nil {
+ return err
+ }
+
+ return nil
+ })
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go
new file mode 100644
index 00000000000..b1dfc37af9b
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go
@@ -0,0 +1,58 @@
+package runtime
+
+import (
+ "io"
+)
+
+// Marshaler defines a conversion between byte sequence and gRPC payloads / fields.
+type Marshaler interface {
+ // Marshal marshals "v" into byte sequence.
+ Marshal(v interface{}) ([]byte, error)
+ // Unmarshal unmarshals "data" into "v".
+ // "v" must be a pointer value.
+ Unmarshal(data []byte, v interface{}) error
+ // NewDecoder returns a Decoder which reads byte sequence from "r".
+ NewDecoder(r io.Reader) Decoder
+ // NewEncoder returns an Encoder which writes bytes sequence into "w".
+ NewEncoder(w io.Writer) Encoder
+ // ContentType returns the Content-Type which this marshaler is responsible for.
+ // The parameter describes the type which is being marshalled, which can sometimes
+ // affect the content type returned.
+ ContentType(v interface{}) string
+}
+
+// Decoder decodes a byte sequence
+type Decoder interface {
+ Decode(v interface{}) error
+}
+
+// Encoder encodes gRPC payloads / fields into byte sequence.
+type Encoder interface {
+ Encode(v interface{}) error
+}
+
+// DecoderFunc adapts an decoder function into Decoder.
+type DecoderFunc func(v interface{}) error
+
+// Decode delegates invocations to the underlying function itself.
+func (f DecoderFunc) Decode(v interface{}) error { return f(v) }
+
+// EncoderFunc adapts an encoder function into Encoder
+type EncoderFunc func(v interface{}) error
+
+// Encode delegates invocations to the underlying function itself.
+func (f EncoderFunc) Encode(v interface{}) error { return f(v) }
+
+// Delimited defines the streaming delimiter.
+type Delimited interface {
+ // Delimiter returns the record separator for the stream.
+ Delimiter() []byte
+}
+
+// StreamContentType defines the streaming content type.
+type StreamContentType interface {
+ // StreamContentType returns the content type for a stream. This shares the
+ // same behaviour as for `Marshaler.ContentType`, but is called, if present,
+ // in the case of a streamed response.
+ StreamContentType(v interface{}) string
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go
new file mode 100644
index 00000000000..07c28112c89
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go
@@ -0,0 +1,109 @@
+package runtime
+
+import (
+ "errors"
+ "mime"
+ "net/http"
+
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/protobuf/encoding/protojson"
+)
+
+// MIMEWildcard is the fallback MIME type used for requests which do not match
+// a registered MIME type.
+const MIMEWildcard = "*"
+
+var (
+ acceptHeader = http.CanonicalHeaderKey("Accept")
+ contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
+
+ defaultMarshaler = &HTTPBodyMarshaler{
+ Marshaler: &JSONPb{
+ MarshalOptions: protojson.MarshalOptions{
+ EmitUnpopulated: true,
+ },
+ UnmarshalOptions: protojson.UnmarshalOptions{
+ DiscardUnknown: true,
+ },
+ },
+ }
+)
+
+// MarshalerForRequest returns the inbound/outbound marshalers for this request.
+// It checks the registry on the ServeMux for the MIME type set by the Content-Type header.
+// If it isn't set (or the request Content-Type is empty), checks for "*".
+// If there are multiple Content-Type headers set, choose the first one that it can
+// exactly match in the registry.
+// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler.
+func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) {
+ for _, acceptVal := range r.Header[acceptHeader] {
+ if m, ok := mux.marshalers.mimeMap[acceptVal]; ok {
+ outbound = m
+ break
+ }
+ }
+
+ for _, contentTypeVal := range r.Header[contentTypeHeader] {
+ contentType, _, err := mime.ParseMediaType(contentTypeVal)
+ if err != nil {
+ grpclog.Errorf("Failed to parse Content-Type %s: %v", contentTypeVal, err)
+ continue
+ }
+ if m, ok := mux.marshalers.mimeMap[contentType]; ok {
+ inbound = m
+ break
+ }
+ }
+
+ if inbound == nil {
+ inbound = mux.marshalers.mimeMap[MIMEWildcard]
+ }
+ if outbound == nil {
+ outbound = inbound
+ }
+
+ return inbound, outbound
+}
+
+// marshalerRegistry is a mapping from MIME types to Marshalers.
+type marshalerRegistry struct {
+ mimeMap map[string]Marshaler
+}
+
+// add adds a marshaler for a case-sensitive MIME type string ("*" to match any
+// MIME type).
+func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
+ if len(mime) == 0 {
+ return errors.New("empty MIME type")
+ }
+
+ m.mimeMap[mime] = marshaler
+
+ return nil
+}
+
+// makeMarshalerMIMERegistry returns a new registry of marshalers.
+// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
+//
+// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
+// with an "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
+// with an "application/json" Content-Type.
+// "*" can be used to match any Content-Type.
+// This can be attached to a ServerMux with the marshaler option.
+func makeMarshalerMIMERegistry() marshalerRegistry {
+ return marshalerRegistry{
+ mimeMap: map[string]Marshaler{
+ MIMEWildcard: defaultMarshaler,
+ },
+ }
+}
+
+// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound
+// Marshalers to a MIME type in mux.
+func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption {
+ return func(mux *ServeMux) {
+ if err := mux.marshalers.add(mime, marshaler); err != nil {
+ panic(err)
+ }
+ }
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
new file mode 100644
index 00000000000..3eb16167173
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
@@ -0,0 +1,553 @@
+package runtime
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/textproto"
+ "regexp"
+ "strings"
+
+ "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/health/grpc_health_v1"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+)
+
+// UnescapingMode defines the behavior of ServeMux when unescaping path parameters.
+type UnescapingMode int
+
+const (
+ // UnescapingModeLegacy is the default V2 behavior, which escapes the entire
+ // path string before doing any routing.
+ UnescapingModeLegacy UnescapingMode = iota
+
+ // UnescapingModeAllExceptReserved unescapes all path parameters except RFC 6570
+ // reserved characters.
+ UnescapingModeAllExceptReserved
+
+ // UnescapingModeAllExceptSlash unescapes URL path parameters except path
+ // separators, which will be left as "%2F".
+ UnescapingModeAllExceptSlash
+
+ // UnescapingModeAllCharacters unescapes all URL path parameters.
+ UnescapingModeAllCharacters
+
+ // UnescapingModeDefault is the default escaping type.
+ // TODO(v3): default this to UnescapingModeAllExceptReserved per grpc-httpjson-transcoding's
+ // reference implementation
+ UnescapingModeDefault = UnescapingModeLegacy
+)
+
+var encodedPathSplitter = regexp.MustCompile("(/|%2F)")
+
+// A HandlerFunc handles a specific pair of path pattern and HTTP method.
+type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
+
+// A Middleware handler wraps another HandlerFunc to do some pre- and/or post-processing of the request. This is used as an alternative to gRPC interceptors when using the direct-to-implementation
+// registration methods. It is generally recommended to use gRPC client or server interceptors instead
+// where possible.
+type Middleware func(HandlerFunc) HandlerFunc
+
+// ServeMux is a request multiplexer for grpc-gateway.
+// It matches http requests to patterns and invokes the corresponding handler.
+type ServeMux struct {
+ // handlers maps HTTP method to a list of handlers.
+ handlers map[string][]handler
+ middlewares []Middleware
+ forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error
+ forwardResponseRewriter ForwardResponseRewriter
+ marshalers marshalerRegistry
+ incomingHeaderMatcher HeaderMatcherFunc
+ outgoingHeaderMatcher HeaderMatcherFunc
+ outgoingTrailerMatcher HeaderMatcherFunc
+ metadataAnnotators []func(context.Context, *http.Request) metadata.MD
+ errorHandler ErrorHandlerFunc
+ streamErrorHandler StreamErrorHandlerFunc
+ routingErrorHandler RoutingErrorHandlerFunc
+ disablePathLengthFallback bool
+ unescapingMode UnescapingMode
+ writeContentLength bool
+}
+
+// ServeMuxOption is an option that can be given to a ServeMux on construction.
+type ServeMuxOption func(*ServeMux)
+
+// ForwardResponseRewriter is the signature of a function that is capable of rewriting messages
+// before they are forwarded in a unary, stream, or error response.
+type ForwardResponseRewriter func(ctx context.Context, response proto.Message) (any, error)
+
+// WithForwardResponseRewriter returns a ServeMuxOption that allows for implementers to insert logic
+// that can rewrite the final response before it is forwarded.
+//
+// The response rewriter function is called during unary message forwarding, stream message
+// forwarding and when errors are being forwarded.
+//
+// NOTE: Using this option will likely make what is generated by `protoc-gen-openapiv2` incorrect.
+// Since this option involves making runtime changes to the response shape or type.
+func WithForwardResponseRewriter(fwdResponseRewriter ForwardResponseRewriter) ServeMuxOption {
+ return func(sm *ServeMux) {
+ sm.forwardResponseRewriter = fwdResponseRewriter
+ }
+}
+
+// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
+//
+// forwardResponseOption is an option that will be called on the relevant context.Context,
+// http.ResponseWriter, and proto.Message before every forwarded response.
+//
+// The message may be nil in the case where just a header is being sent.
+func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
+ }
+}
+
+// WithUnescapingMode sets the escaping type. See the definitions of UnescapingMode
+// for more information.
+func WithUnescapingMode(mode UnescapingMode) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.unescapingMode = mode
+ }
+}
+
+// WithMiddlewares sets server middleware for all handlers. This is useful as an alternative to gRPC
+// interceptors when using the direct-to-implementation registration methods and cannot rely
+// on gRPC interceptors. It's recommended to use gRPC interceptors instead if possible.
+func WithMiddlewares(middlewares ...Middleware) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.middlewares = append(serveMux.middlewares, middlewares...)
+ }
+}
+
+// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters.
+// Configuring this will mean the generated OpenAPI output is no longer correct, and it should be
+// done with careful consideration.
+func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ currentQueryParser = queryParameterParser
+ }
+}
+
+// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context.
+type HeaderMatcherFunc func(string) (string, bool)
+
+// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
+// keys (as specified by the IANA, e.g: Accept, Cookie, Host) to the gRPC metadata with the grpcgateway- prefix. If you want to know which headers are considered permanent, you can view the isPermanentHTTPHeader function.
+// HTTP headers that start with 'Grpc-Metadata-' are mapped to gRPC metadata after removing the prefix 'Grpc-Metadata-'.
+// Other headers are not added to the gRPC metadata.
+func DefaultHeaderMatcher(key string) (string, bool) {
+ switch key = textproto.CanonicalMIMEHeaderKey(key); {
+ case isPermanentHTTPHeader(key):
+ return MetadataPrefix + key, true
+ case strings.HasPrefix(key, MetadataHeaderPrefix):
+ return key[len(MetadataHeaderPrefix):], true
+ }
+ return "", false
+}
+
+func defaultOutgoingHeaderMatcher(key string) (string, bool) {
+ return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
+}
+
+func defaultOutgoingTrailerMatcher(key string) (string, bool) {
+ return fmt.Sprintf("%s%s", MetadataTrailerPrefix, key), true
+}
+
+// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
+//
+// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
+// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return the modified header.
+func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+ for _, header := range fn.matchedMalformedHeaders() {
+ grpclog.Warningf("The configured forwarding filter would allow %q to be sent to the gRPC server, which will likely cause errors. See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more information.", header)
+ }
+
+ return func(mux *ServeMux) {
+ mux.incomingHeaderMatcher = fn
+ }
+}
+
+// matchedMalformedHeaders returns the malformed headers that would be forwarded to gRPC server.
+func (fn HeaderMatcherFunc) matchedMalformedHeaders() []string {
+ if fn == nil {
+ return nil
+ }
+ headers := make([]string, 0)
+ for header := range malformedHTTPHeaders {
+ out, accept := fn(header)
+ if accept && isMalformedHTTPHeader(out) {
+ headers = append(headers, out)
+ }
+ }
+ return headers
+}
+
+// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
+//
+// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
+// passed to http response returned from gateway. To transform the header before passing to response,
+// matcher should return the modified header.
+func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+ return func(mux *ServeMux) {
+ mux.outgoingHeaderMatcher = fn
+ }
+}
+
+// WithOutgoingTrailerMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
+//
+// This matcher will be called with each header in response trailer metadata. If matcher returns true, that header will be
+// passed to http response returned from gateway. To transform the header before passing to response,
+// matcher should return the modified header.
+func WithOutgoingTrailerMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+ return func(mux *ServeMux) {
+ mux.outgoingTrailerMatcher = fn
+ }
+}
+
+// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
+//
+// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
+// is reading token from cookie and adding it in gRPC context.
+func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator)
+ }
+}
+
+// WithErrorHandler returns a ServeMuxOption for configuring a custom error handler.
+//
+// This can be used to configure a custom error response.
+func WithErrorHandler(fn ErrorHandlerFunc) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.errorHandler = fn
+ }
+}
+
+// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream
+// error handler, which allows for customizing the error trailer for server-streaming
+// calls.
+//
+// For stream errors that occur before any response has been written, the mux's
+// ErrorHandler will be invoked. However, once data has been written, the errors must
+// be handled differently: they must be included in the response body. The response body's
+// final message will include the error details returned by the stream error handler.
+func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.streamErrorHandler = fn
+ }
+}
+
+// WithRoutingErrorHandler returns a ServeMuxOption for configuring a custom error handler to handle http routing errors.
+//
+// Method called for errors which can happen before gRPC route selected or executed.
+// The following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest
+func WithRoutingErrorHandler(fn RoutingErrorHandlerFunc) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.routingErrorHandler = fn
+ }
+}
+
+// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback.
+func WithDisablePathLengthFallback() ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.disablePathLengthFallback = true
+ }
+}
+
+// WithWriteContentLength returns a ServeMuxOption to enable writing content length on non-streaming responses
+func WithWriteContentLength() ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.writeContentLength = true
+ }
+}
+
+// WithHealthEndpointAt returns a ServeMuxOption that will add an endpoint to the created ServeMux at the path specified by endpointPath.
+// When called the handler will forward the request to the upstream grpc service health check (defined in the
+// gRPC Health Checking Protocol).
+//
+// See here https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/health_check/ for more information on how
+// to setup the protocol in the grpc server.
+//
+// If you define a service as query parameter, this will also be forwarded as service in the HealthCheckRequest.
+func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpointPath string) ServeMuxOption {
+ return func(s *ServeMux) {
+ // error can be ignored since pattern is definitely valid
+ _ = s.HandlePath(
+ http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string,
+ ) {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ annotatedContext, err := AnnotateContext(r.Context(), s, r, grpc_health_v1.Health_Check_FullMethodName, WithHTTPPathPattern(endpointPath))
+ if err != nil {
+ s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
+ return
+ }
+
+ var md ServerMetadata
+ resp, err := healthCheckClient.Check(annotatedContext, &grpc_health_v1.HealthCheckRequest{
+ Service: r.URL.Query().Get("service"),
+ }, grpc.Header(&md.HeaderMD), grpc.Trailer(&md.TrailerMD))
+ annotatedContext = NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ s.errorHandler(annotatedContext, s, outboundMarshaler, w, r, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+
+ if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING {
+ switch resp.GetStatus() {
+ case grpc_health_v1.HealthCheckResponse_NOT_SERVING, grpc_health_v1.HealthCheckResponse_UNKNOWN:
+ err = status.Error(codes.Unavailable, resp.String())
+ case grpc_health_v1.HealthCheckResponse_SERVICE_UNKNOWN:
+ err = status.Error(codes.NotFound, resp.String())
+ }
+
+ s.errorHandler(annotatedContext, s, outboundMarshaler, w, r, err)
+ return
+ }
+
+ _ = outboundMarshaler.NewEncoder(w).Encode(resp)
+ })
+ }
+}
+
+// WithHealthzEndpoint returns a ServeMuxOption that will add a /healthz endpoint to the created ServeMux.
+//
+// See WithHealthEndpointAt for the general implementation.
+func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMuxOption {
+ return WithHealthEndpointAt(healthCheckClient, "/healthz")
+}
+
+// NewServeMux returns a new ServeMux whose internal mapping is empty.
+func NewServeMux(opts ...ServeMuxOption) *ServeMux {
+ serveMux := &ServeMux{
+ handlers: make(map[string][]handler),
+ forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
+ forwardResponseRewriter: func(ctx context.Context, response proto.Message) (any, error) { return response, nil },
+ marshalers: makeMarshalerMIMERegistry(),
+ errorHandler: DefaultHTTPErrorHandler,
+ streamErrorHandler: DefaultStreamErrorHandler,
+ routingErrorHandler: DefaultRoutingErrorHandler,
+ unescapingMode: UnescapingModeDefault,
+ }
+
+ for _, opt := range opts {
+ opt(serveMux)
+ }
+
+ if serveMux.incomingHeaderMatcher == nil {
+ serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
+ }
+ if serveMux.outgoingHeaderMatcher == nil {
+ serveMux.outgoingHeaderMatcher = defaultOutgoingHeaderMatcher
+ }
+ if serveMux.outgoingTrailerMatcher == nil {
+ serveMux.outgoingTrailerMatcher = defaultOutgoingTrailerMatcher
+ }
+
+ return serveMux
+}
+
+// Handle associates "h" to the pair of HTTP method and path pattern.
+func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
+ if len(s.middlewares) > 0 {
+ h = chainMiddlewares(s.middlewares)(h)
+ }
+ s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...)
+}
+
+// HandlePath allows users to configure custom path handlers.
+// refer: https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/inject_router/
+func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) error {
+ compiler, err := httprule.Parse(pathPattern)
+ if err != nil {
+ return fmt.Errorf("parsing path pattern: %w", err)
+ }
+ tp := compiler.Compile()
+ pattern, err := NewPattern(tp.Version, tp.OpCodes, tp.Pool, tp.Verb)
+ if err != nil {
+ return fmt.Errorf("creating new pattern: %w", err)
+ }
+ s.Handle(meth, pattern, h)
+ return nil
+}
+
+// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.URL.Path.
+func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ path := r.URL.Path
+ if !strings.HasPrefix(path, "/") {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusBadRequest)
+ return
+ }
+
+ // TODO(v3): remove UnescapingModeLegacy
+ if s.unescapingMode != UnescapingModeLegacy && r.URL.RawPath != "" {
+ path = r.URL.RawPath
+ }
+
+ if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
+ if err := r.ParseForm(); err != nil {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ sterr := status.Error(codes.InvalidArgument, err.Error())
+ s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+ return
+ }
+ r.Method = strings.ToUpper(override)
+ }
+
+ var pathComponents []string
+ // since in UnescapeModeLegacy, the URL will already have been fully unescaped, if we also split on "%2F"
+ // in this escaping mode we would be double unescaping but in UnescapingModeAllCharacters, we still do as the
+ // path is the RawPath (i.e. unescaped). That does mean that the behavior of this function will change its default
+ // behavior when the UnescapingModeDefault gets changed from UnescapingModeLegacy to UnescapingModeAllExceptReserved
+ if s.unescapingMode == UnescapingModeAllCharacters {
+ pathComponents = encodedPathSplitter.Split(path[1:], -1)
+ } else {
+ pathComponents = strings.Split(path[1:], "/")
+ }
+
+ lastPathComponent := pathComponents[len(pathComponents)-1]
+
+ for _, h := range s.handlers[r.Method] {
+ // If the pattern has a verb, explicitly look for a suffix in the last
+ // component that matches a colon plus the verb. This allows us to
+ // handle some cases that otherwise can't be correctly handled by the
+ // former LastIndex case, such as when the verb literal itself contains
+ // a colon. This should work for all cases that have run through the
+ // parser because we know what verb we're looking for, however, there
+ // are still some cases that the parser itself cannot disambiguate. See
+ // the comment there if interested.
+
+ var verb string
+ patVerb := h.pat.Verb()
+
+ idx := -1
+ if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) {
+ idx = len(lastPathComponent) - len(patVerb) - 1
+ }
+ if idx == 0 {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
+ return
+ }
+
+ comps := make([]string, len(pathComponents))
+ copy(comps, pathComponents)
+
+ if idx > 0 {
+ comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:]
+ }
+
+ pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode)
+ if err != nil {
+ var mse MalformedSequenceError
+ if ok := errors.As(err, &mse); ok {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: mse,
+ })
+ }
+ continue
+ }
+ s.handleHandler(h, w, r, pathParams)
+ return
+ }
+
+ // if no handler has found for the request, lookup for other methods
+ // to handle POST -> GET fallback if the request is subject to path
+ // length fallback.
+ // Note we are not eagerly checking the request here as we want to return the
+ // right HTTP status code, and we need to process the fallback candidates in
+ // order to do that.
+ for m, handlers := range s.handlers {
+ if m == r.Method {
+ continue
+ }
+ for _, h := range handlers {
+ var verb string
+ patVerb := h.pat.Verb()
+
+ idx := -1
+ if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) {
+ idx = len(lastPathComponent) - len(patVerb) - 1
+ }
+
+ comps := make([]string, len(pathComponents))
+ copy(comps, pathComponents)
+
+ if idx > 0 {
+ comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:]
+ }
+
+ pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode)
+ if err != nil {
+ var mse MalformedSequenceError
+ if ok := errors.As(err, &mse); ok {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: mse,
+ })
+ }
+ continue
+ }
+
+ // X-HTTP-Method-Override is optional. Always allow fallback to POST.
+ // Also, only consider POST -> GET fallbacks, and avoid falling back to
+ // potentially dangerous operations like DELETE.
+ if s.isPathLengthFallback(r) && m == http.MethodGet {
+ if err := r.ParseForm(); err != nil {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ sterr := status.Error(codes.InvalidArgument, err.Error())
+ s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+ return
+ }
+ s.handleHandler(h, w, r, pathParams)
+ return
+ }
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusMethodNotAllowed)
+ return
+ }
+ }
+
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
+}
+
+// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
+func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
+ return s.forwardResponseOptions
+}
+
+func (s *ServeMux) isPathLengthFallback(r *http.Request) bool {
+ return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
+}
+
+type handler struct {
+ pat Pattern
+ h HandlerFunc
+}
+
+func (s *ServeMux) handleHandler(h handler, w http.ResponseWriter, r *http.Request, pathParams map[string]string) {
+ h.h(w, r.WithContext(withHTTPPattern(r.Context(), h.pat)), pathParams)
+}
+
+func chainMiddlewares(mws []Middleware) Middleware {
+ return func(next HandlerFunc) HandlerFunc {
+ for i := len(mws); i > 0; i-- {
+ next = mws[i-1](next)
+ }
+ return next
+ }
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
new file mode 100644
index 00000000000..e54507145b6
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
@@ -0,0 +1,381 @@
+package runtime
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+ "google.golang.org/grpc/grpclog"
+)
+
+var (
+ // ErrNotMatch indicates that the given HTTP request path does not match to the pattern.
+ ErrNotMatch = errors.New("not match to the path pattern")
+ // ErrInvalidPattern indicates that the given definition of Pattern is not valid.
+ ErrInvalidPattern = errors.New("invalid pattern")
+)
+
+type MalformedSequenceError string
+
+func (e MalformedSequenceError) Error() string {
+ return "malformed path escape " + strconv.Quote(string(e))
+}
+
+type op struct {
+ code utilities.OpCode
+ operand int
+}
+
+// Pattern is a template pattern of http request paths defined in
+// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto
+type Pattern struct {
+ // ops is a list of operations
+ ops []op
+ // pool is a constant pool indexed by the operands or vars.
+ pool []string
+ // vars is a list of variables names to be bound by this pattern
+ vars []string
+ // stacksize is the max depth of the stack
+ stacksize int
+ // tailLen is the length of the fixed-size segments after a deep wildcard
+ tailLen int
+ // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
+ verb string
+}
+
+// NewPattern returns a new Pattern from the given definition values.
+// "ops" is a sequence of op codes. "pool" is a constant pool.
+// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
+// "version" must be 1 for now.
+// It returns an error if the given definition is invalid.
+func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) {
+ if version != 1 {
+ grpclog.Errorf("unsupported version: %d", version)
+ return Pattern{}, ErrInvalidPattern
+ }
+
+ l := len(ops)
+ if l%2 != 0 {
+ grpclog.Errorf("odd number of ops codes: %d", l)
+ return Pattern{}, ErrInvalidPattern
+ }
+
+ var (
+ typedOps []op
+ stack, maxstack int
+ tailLen int
+ pushMSeen bool
+ vars []string
+ )
+ for i := 0; i < l; i += 2 {
+ op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]}
+ switch op.code {
+ case utilities.OpNop:
+ continue
+ case utilities.OpPush:
+ if pushMSeen {
+ tailLen++
+ }
+ stack++
+ case utilities.OpPushM:
+ if pushMSeen {
+ grpclog.Error("pushM appears twice")
+ return Pattern{}, ErrInvalidPattern
+ }
+ pushMSeen = true
+ stack++
+ case utilities.OpLitPush:
+ if op.operand < 0 || len(pool) <= op.operand {
+ grpclog.Errorf("negative literal index: %d", op.operand)
+ return Pattern{}, ErrInvalidPattern
+ }
+ if pushMSeen {
+ tailLen++
+ }
+ stack++
+ case utilities.OpConcatN:
+ if op.operand <= 0 {
+ grpclog.Errorf("negative concat size: %d", op.operand)
+ return Pattern{}, ErrInvalidPattern
+ }
+ stack -= op.operand
+ if stack < 0 {
+ grpclog.Error("stack underflow")
+ return Pattern{}, ErrInvalidPattern
+ }
+ stack++
+ case utilities.OpCapture:
+ if op.operand < 0 || len(pool) <= op.operand {
+ grpclog.Errorf("variable name index out of bound: %d", op.operand)
+ return Pattern{}, ErrInvalidPattern
+ }
+ v := pool[op.operand]
+ op.operand = len(vars)
+ vars = append(vars, v)
+ stack--
+ if stack < 0 {
+ grpclog.Error("stack underflow")
+ return Pattern{}, ErrInvalidPattern
+ }
+ default:
+ grpclog.Errorf("invalid opcode: %d", op.code)
+ return Pattern{}, ErrInvalidPattern
+ }
+
+ if maxstack < stack {
+ maxstack = stack
+ }
+ typedOps = append(typedOps, op)
+ }
+ return Pattern{
+ ops: typedOps,
+ pool: pool,
+ vars: vars,
+ stacksize: maxstack,
+ tailLen: tailLen,
+ verb: verb,
+ }, nil
+}
+
+// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization.
+func MustPattern(p Pattern, err error) Pattern {
+ if err != nil {
+ grpclog.Fatalf("Pattern initialization failed: %v", err)
+ }
+ return p
+}
+
+// MatchAndEscape examines components to determine if they match to a Pattern.
+// MatchAndEscape will return an error if no Patterns matched or if a pattern
+// matched but contained malformed escape sequences. If successful, the function
+// returns a mapping from field paths to their captured values.
+func (p Pattern) MatchAndEscape(components []string, verb string, unescapingMode UnescapingMode) (map[string]string, error) {
+ if p.verb != verb {
+ if p.verb != "" {
+ return nil, ErrNotMatch
+ }
+ if len(components) == 0 {
+ components = []string{":" + verb}
+ } else {
+ components = append([]string{}, components...)
+ components[len(components)-1] += ":" + verb
+ }
+ }
+
+ var pos int
+ stack := make([]string, 0, p.stacksize)
+ captured := make([]string, len(p.vars))
+ l := len(components)
+ for _, op := range p.ops {
+ var err error
+
+ switch op.code {
+ case utilities.OpNop:
+ continue
+ case utilities.OpPush, utilities.OpLitPush:
+ if pos >= l {
+ return nil, ErrNotMatch
+ }
+ c := components[pos]
+ if op.code == utilities.OpLitPush {
+ if lit := p.pool[op.operand]; c != lit {
+ return nil, ErrNotMatch
+ }
+ } else if op.code == utilities.OpPush {
+ if c, err = unescape(c, unescapingMode, false); err != nil {
+ return nil, err
+ }
+ }
+ stack = append(stack, c)
+ pos++
+ case utilities.OpPushM:
+ end := len(components)
+ if end < pos+p.tailLen {
+ return nil, ErrNotMatch
+ }
+ end -= p.tailLen
+ c := strings.Join(components[pos:end], "/")
+ if c, err = unescape(c, unescapingMode, true); err != nil {
+ return nil, err
+ }
+ stack = append(stack, c)
+ pos = end
+ case utilities.OpConcatN:
+ n := op.operand
+ l := len(stack) - n
+ stack = append(stack[:l], strings.Join(stack[l:], "/"))
+ case utilities.OpCapture:
+ n := len(stack) - 1
+ captured[op.operand] = stack[n]
+ stack = stack[:n]
+ }
+ }
+ if pos < l {
+ return nil, ErrNotMatch
+ }
+ bindings := make(map[string]string)
+ for i, val := range captured {
+ bindings[p.vars[i]] = val
+ }
+ return bindings, nil
+}
+
+// MatchAndEscape examines components to determine if they match to a Pattern.
+// It will never perform per-component unescaping (see: UnescapingModeLegacy).
+// MatchAndEscape will return an error if no Patterns matched. If successful,
+// the function returns a mapping from field paths to their captured values.
+//
+// Deprecated: Use MatchAndEscape.
+func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
+ return p.MatchAndEscape(components, verb, UnescapingModeDefault)
+}
+
+// Verb returns the verb part of the Pattern.
+func (p Pattern) Verb() string { return p.verb }
+
+func (p Pattern) String() string {
+ var stack []string
+ for _, op := range p.ops {
+ switch op.code {
+ case utilities.OpNop:
+ continue
+ case utilities.OpPush:
+ stack = append(stack, "*")
+ case utilities.OpLitPush:
+ stack = append(stack, p.pool[op.operand])
+ case utilities.OpPushM:
+ stack = append(stack, "**")
+ case utilities.OpConcatN:
+ n := op.operand
+ l := len(stack) - n
+ stack = append(stack[:l], strings.Join(stack[l:], "/"))
+ case utilities.OpCapture:
+ n := len(stack) - 1
+ stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n])
+ }
+ }
+ segs := strings.Join(stack, "/")
+ if p.verb != "" {
+ return fmt.Sprintf("/%s:%s", segs, p.verb)
+ }
+ return "/" + segs
+}
+
+/*
+ * The following code is adopted and modified from Go's standard library
+ * and carries the attached license.
+ *
+ * Copyright 2009 The Go Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style
+ * license that can be found in the LICENSE file.
+ */
+
+// ishex returns whether or not the given byte is a valid hex character
+func ishex(c byte) bool {
+ switch {
+ case '0' <= c && c <= '9':
+ return true
+ case 'a' <= c && c <= 'f':
+ return true
+ case 'A' <= c && c <= 'F':
+ return true
+ }
+ return false
+}
+
+func isRFC6570Reserved(c byte) bool {
+ switch c {
+ case '!', '#', '$', '&', '\'', '(', ')', '*',
+ '+', ',', '/', ':', ';', '=', '?', '@', '[', ']':
+ return true
+ default:
+ return false
+ }
+}
+
+// unhex converts a hex point to the bit representation
+func unhex(c byte) byte {
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0'
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10
+ }
+ return 0
+}
+
+// shouldUnescapeWithMode returns true if the character is escapable with the
+// given mode
+func shouldUnescapeWithMode(c byte, mode UnescapingMode) bool {
+ switch mode {
+ case UnescapingModeAllExceptReserved:
+ if isRFC6570Reserved(c) {
+ return false
+ }
+ case UnescapingModeAllExceptSlash:
+ if c == '/' {
+ return false
+ }
+ case UnescapingModeAllCharacters:
+ return true
+ }
+ return true
+}
+
+// unescape unescapes a path string using the provided mode
+func unescape(s string, mode UnescapingMode, multisegment bool) (string, error) {
+ // TODO(v3): remove UnescapingModeLegacy
+ if mode == UnescapingModeLegacy {
+ return s, nil
+ }
+
+ if !multisegment {
+ mode = UnescapingModeAllCharacters
+ }
+
+ // Count %, check that they're well-formed.
+ n := 0
+ for i := 0; i < len(s); {
+ if s[i] == '%' {
+ n++
+ if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
+ s = s[i:]
+ if len(s) > 3 {
+ s = s[:3]
+ }
+
+ return "", MalformedSequenceError(s)
+ }
+ i += 3
+ } else {
+ i++
+ }
+ }
+
+ if n == 0 {
+ return s, nil
+ }
+
+ var t strings.Builder
+ t.Grow(len(s))
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '%':
+ c := unhex(s[i+1])<<4 | unhex(s[i+2])
+ if shouldUnescapeWithMode(c, mode) {
+ t.WriteByte(c)
+ i += 2
+ continue
+ }
+ fallthrough
+ default:
+ t.WriteByte(s[i])
+ }
+ }
+
+ return t.String(), nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go
new file mode 100644
index 00000000000..f710036b350
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go
@@ -0,0 +1,80 @@
+package runtime
+
+import (
+ "google.golang.org/protobuf/proto"
+)
+
+// StringP returns a pointer to a string whose pointee is same as the given string value.
+func StringP(val string) (*string, error) {
+ return proto.String(val), nil
+}
+
+// BoolP parses the given string representation of a boolean value,
+// and returns a pointer to a bool whose value is same as the parsed value.
+func BoolP(val string) (*bool, error) {
+ b, err := Bool(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Bool(b), nil
+}
+
+// Float64P parses the given string representation of a floating point number,
+// and returns a pointer to a float64 whose value is same as the parsed number.
+func Float64P(val string) (*float64, error) {
+ f, err := Float64(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Float64(f), nil
+}
+
+// Float32P parses the given string representation of a floating point number,
+// and returns a pointer to a float32 whose value is same as the parsed number.
+func Float32P(val string) (*float32, error) {
+ f, err := Float32(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Float32(f), nil
+}
+
+// Int64P parses the given string representation of an integer
+// and returns a pointer to an int64 whose value is same as the parsed integer.
+func Int64P(val string) (*int64, error) {
+ i, err := Int64(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Int64(i), nil
+}
+
+// Int32P parses the given string representation of an integer
+// and returns a pointer to an int32 whose value is same as the parsed integer.
+func Int32P(val string) (*int32, error) {
+ i, err := Int32(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Int32(i), err
+}
+
+// Uint64P parses the given string representation of an integer
+// and returns a pointer to a uint64 whose value is same as the parsed integer.
+func Uint64P(val string) (*uint64, error) {
+ i, err := Uint64(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Uint64(i), err
+}
+
+// Uint32P parses the given string representation of an integer
+// and returns a pointer to a uint32 whose value is same as the parsed integer.
+func Uint32P(val string) (*uint32, error) {
+ i, err := Uint32(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Uint32(i), err
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
new file mode 100644
index 00000000000..8549dfb97af
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
@@ -0,0 +1,378 @@
+package runtime
+
+import (
+ "errors"
+ "fmt"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/known/durationpb"
+ field_mask "google.golang.org/protobuf/types/known/fieldmaskpb"
+ "google.golang.org/protobuf/types/known/structpb"
+ "google.golang.org/protobuf/types/known/timestamppb"
+ "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`)
+
+var currentQueryParser QueryParameterParser = &DefaultQueryParser{}
+
+// QueryParameterParser defines interface for all query parameter parsers
+type QueryParameterParser interface {
+ Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error
+}
+
+// PopulateQueryParameters parses query parameters
+// into "msg" using current query parser
+func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
+ return currentQueryParser.Parse(msg, values, filter)
+}
+
+// DefaultQueryParser is a QueryParameterParser which implements the default
+// query parameters parsing behavior.
+//
+// See https://github.com/grpc-ecosystem/grpc-gateway/issues/2632 for more context.
+type DefaultQueryParser struct{}
+
+// Parse populates "values" into "msg".
+// A value is ignored if its key starts with one of the elements in "filter".
+func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
+ for key, values := range values {
+ if match := valuesKeyRegexp.FindStringSubmatch(key); len(match) == 3 {
+ key = match[1]
+ values = append([]string{match[2]}, values...)
+ }
+
+ msgValue := msg.ProtoReflect()
+ fieldPath := normalizeFieldPath(msgValue, strings.Split(key, "."))
+ if filter.HasCommonPrefix(fieldPath) {
+ continue
+ }
+ if err := populateFieldValueFromPath(msgValue, fieldPath, values); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// PopulateFieldFromPath sets a value in a nested Protobuf structure.
+func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
+ fieldPath := strings.Split(fieldPathString, ".")
+ return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value})
+}
+
+func normalizeFieldPath(msgValue protoreflect.Message, fieldPath []string) []string {
+ newFieldPath := make([]string, 0, len(fieldPath))
+ for i, fieldName := range fieldPath {
+ fields := msgValue.Descriptor().Fields()
+ fieldDesc := fields.ByTextName(fieldName)
+ if fieldDesc == nil {
+ fieldDesc = fields.ByJSONName(fieldName)
+ }
+ if fieldDesc == nil {
+ // return initial field path values if no matching message field was found
+ return fieldPath
+ }
+
+ newFieldPath = append(newFieldPath, string(fieldDesc.Name()))
+
+ // If this is the last element, we're done
+ if i == len(fieldPath)-1 {
+ break
+ }
+
+ // Only singular message fields are allowed
+ if fieldDesc.Message() == nil || fieldDesc.Cardinality() == protoreflect.Repeated {
+ return fieldPath
+ }
+
+ // Get the nested message
+ msgValue = msgValue.Get(fieldDesc).Message()
+ }
+
+ return newFieldPath
+}
+
+func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error {
+ if len(fieldPath) < 1 {
+ return errors.New("no field path")
+ }
+ if len(values) < 1 {
+ return errors.New("no value provided")
+ }
+
+ var fieldDescriptor protoreflect.FieldDescriptor
+ for i, fieldName := range fieldPath {
+ fields := msgValue.Descriptor().Fields()
+
+ // Get field by name
+ fieldDescriptor = fields.ByName(protoreflect.Name(fieldName))
+ if fieldDescriptor == nil {
+ fieldDescriptor = fields.ByJSONName(fieldName)
+ if fieldDescriptor == nil {
+ // We're not returning an error here because this could just be
+ // an extra query parameter that isn't part of the request.
+ grpclog.Infof("field not found in %q: %q", msgValue.Descriptor().FullName(), strings.Join(fieldPath, "."))
+ return nil
+ }
+ }
+
+ // Check if oneof already set
+ if of := fieldDescriptor.ContainingOneof(); of != nil && !of.IsSynthetic() {
+ if f := msgValue.WhichOneof(of); f != nil {
+ if fieldDescriptor.Message() == nil || fieldDescriptor.FullName() != f.FullName() {
+ return fmt.Errorf("field already set for oneof %q", of.FullName().Name())
+ }
+ }
+ }
+
+ // If this is the last element, we're done
+ if i == len(fieldPath)-1 {
+ break
+ }
+
+ // Only singular message fields are allowed
+ if fieldDescriptor.Message() == nil || fieldDescriptor.Cardinality() == protoreflect.Repeated {
+ return fmt.Errorf("invalid path: %q is not a message", fieldName)
+ }
+
+ // Get the nested message
+ msgValue = msgValue.Mutable(fieldDescriptor).Message()
+ }
+
+ switch {
+ case fieldDescriptor.IsList():
+ return populateRepeatedField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).List(), values)
+ case fieldDescriptor.IsMap():
+ return populateMapField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).Map(), values)
+ }
+
+ if len(values) > 1 {
+ return fmt.Errorf("too many values for field %q: %s", fieldDescriptor.FullName().Name(), strings.Join(values, ", "))
+ }
+
+ return populateField(fieldDescriptor, msgValue, values[0])
+}
+
+func populateField(fieldDescriptor protoreflect.FieldDescriptor, msgValue protoreflect.Message, value string) error {
+ v, err := parseField(fieldDescriptor, value)
+ if err != nil {
+ return fmt.Errorf("parsing field %q: %w", fieldDescriptor.FullName().Name(), err)
+ }
+
+ msgValue.Set(fieldDescriptor, v)
+ return nil
+}
+
+func populateRepeatedField(fieldDescriptor protoreflect.FieldDescriptor, list protoreflect.List, values []string) error {
+ for _, value := range values {
+ v, err := parseField(fieldDescriptor, value)
+ if err != nil {
+ return fmt.Errorf("parsing list %q: %w", fieldDescriptor.FullName().Name(), err)
+ }
+ list.Append(v)
+ }
+
+ return nil
+}
+
+func populateMapField(fieldDescriptor protoreflect.FieldDescriptor, mp protoreflect.Map, values []string) error {
+ if len(values) != 2 {
+ return fmt.Errorf("more than one value provided for key %q in map %q", values[0], fieldDescriptor.FullName())
+ }
+
+ key, err := parseField(fieldDescriptor.MapKey(), values[0])
+ if err != nil {
+ return fmt.Errorf("parsing map key %q: %w", fieldDescriptor.FullName().Name(), err)
+ }
+
+ value, err := parseField(fieldDescriptor.MapValue(), values[1])
+ if err != nil {
+ return fmt.Errorf("parsing map value %q: %w", fieldDescriptor.FullName().Name(), err)
+ }
+
+ mp.Set(key.MapKey(), value)
+
+ return nil
+}
+
+func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (protoreflect.Value, error) {
+ switch fieldDescriptor.Kind() {
+ case protoreflect.BoolKind:
+ v, err := strconv.ParseBool(value)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfBool(v), nil
+ case protoreflect.EnumKind:
+ enum, err := protoregistry.GlobalTypes.FindEnumByName(fieldDescriptor.Enum().FullName())
+ if err != nil {
+ if errors.Is(err, protoregistry.NotFound) {
+ return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName())
+ }
+ return protoreflect.Value{}, fmt.Errorf("failed to look up enum: %w", err)
+ }
+ // Look for enum by name
+ v := enum.Descriptor().Values().ByName(protoreflect.Name(value))
+ if v == nil {
+ i, err := strconv.Atoi(value)
+ if err != nil {
+ return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
+ }
+ // Look for enum by number
+ if v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i)); v == nil {
+ return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
+ }
+ }
+ return protoreflect.ValueOfEnum(v.Number()), nil
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+ v, err := strconv.ParseInt(value, 10, 32)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfInt32(int32(v)), nil
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ v, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfInt64(v), nil
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+ v, err := strconv.ParseUint(value, 10, 32)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfUint32(uint32(v)), nil
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ v, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfUint64(v), nil
+ case protoreflect.FloatKind:
+ v, err := strconv.ParseFloat(value, 32)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfFloat32(float32(v)), nil
+ case protoreflect.DoubleKind:
+ v, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfFloat64(v), nil
+ case protoreflect.StringKind:
+ return protoreflect.ValueOfString(value), nil
+ case protoreflect.BytesKind:
+ v, err := Bytes(value)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfBytes(v), nil
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ return parseMessage(fieldDescriptor.Message(), value)
+ default:
+ panic(fmt.Sprintf("unknown field kind: %v", fieldDescriptor.Kind()))
+ }
+}
+
+func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (protoreflect.Value, error) {
+ var msg proto.Message
+ switch msgDescriptor.FullName() {
+ case "google.protobuf.Timestamp":
+ t, err := time.Parse(time.RFC3339Nano, value)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ timestamp := timestamppb.New(t)
+ if ok := timestamp.IsValid(); !ok {
+ return protoreflect.Value{}, fmt.Errorf("%s before 0001-01-01", value)
+ }
+ msg = timestamp
+ case "google.protobuf.Duration":
+ d, err := time.ParseDuration(value)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = durationpb.New(d)
+ case "google.protobuf.DoubleValue":
+ v, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.Double(v)
+ case "google.protobuf.FloatValue":
+ v, err := strconv.ParseFloat(value, 32)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.Float(float32(v))
+ case "google.protobuf.Int64Value":
+ v, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.Int64(v)
+ case "google.protobuf.Int32Value":
+ v, err := strconv.ParseInt(value, 10, 32)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.Int32(int32(v))
+ case "google.protobuf.UInt64Value":
+ v, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.UInt64(v)
+ case "google.protobuf.UInt32Value":
+ v, err := strconv.ParseUint(value, 10, 32)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.UInt32(uint32(v))
+ case "google.protobuf.BoolValue":
+ v, err := strconv.ParseBool(value)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.Bool(v)
+ case "google.protobuf.StringValue":
+ msg = wrapperspb.String(value)
+ case "google.protobuf.BytesValue":
+ v, err := Bytes(value)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.Bytes(v)
+ case "google.protobuf.FieldMask":
+ fm := &field_mask.FieldMask{}
+ fm.Paths = append(fm.Paths, strings.Split(value, ",")...)
+ msg = fm
+ case "google.protobuf.Value":
+ var v structpb.Value
+ if err := protojson.Unmarshal([]byte(value), &v); err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = &v
+ case "google.protobuf.Struct":
+ var v structpb.Struct
+ if err := protojson.Unmarshal([]byte(value), &v); err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = &v
+ default:
+ return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName()))
+ }
+
+ return protoreflect.ValueOfMessage(msg.ProtoReflect()), nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
new file mode 100644
index 00000000000..b8940946577
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
@@ -0,0 +1,31 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+ name = "utilities",
+ srcs = [
+ "doc.go",
+ "pattern.go",
+ "readerfactory.go",
+ "string_array_flag.go",
+ "trie.go",
+ ],
+ importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities",
+)
+
+go_test(
+ name = "utilities_test",
+ size = "small",
+ srcs = [
+ "string_array_flag_test.go",
+ "trie_test.go",
+ ],
+ deps = [":utilities"],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":utilities",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go
new file mode 100644
index 00000000000..cf79a4d5886
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go
@@ -0,0 +1,2 @@
+// Package utilities provides members for internal use in grpc-gateway.
+package utilities
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go
new file mode 100644
index 00000000000..38ca39cc538
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go
@@ -0,0 +1,22 @@
+package utilities
+
+// OpCode is an opcode of compiled path patterns.
+type OpCode int
+
+// These constants are the valid values of OpCode.
+const (
+ // OpNop does nothing
+ OpNop = OpCode(iota)
+ // OpPush pushes a component to stack
+ OpPush
+ // OpLitPush pushes a component to stack if it matches to the literal
+ OpLitPush
+ // OpPushM concatenates the remaining components and pushes it to stack
+ OpPushM
+ // OpConcatN pops N items from stack, concatenates them and pushes it back to stack
+ OpConcatN
+ // OpCapture pops an item and binds it to the variable
+ OpCapture
+ // OpEnd is the least positive invalid opcode.
+ OpEnd
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
new file mode 100644
index 00000000000..01d26edae3c
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
@@ -0,0 +1,19 @@
+package utilities
+
+import (
+ "bytes"
+ "io"
+)
+
+// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins
+// at the start of the stream
+func IOReaderFactory(r io.Reader) (func() io.Reader, error) {
+ b, err := io.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ return func() io.Reader {
+ return bytes.NewReader(b)
+ }, nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go
new file mode 100644
index 00000000000..66aa5f2dcc5
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go
@@ -0,0 +1,33 @@
+package utilities
+
+import (
+ "flag"
+ "strings"
+)
+
+// flagInterface is a cut down interface to `flag`
+type flagInterface interface {
+ Var(value flag.Value, name string, usage string)
+}
+
+// StringArrayFlag defines a flag with the specified name and usage string.
+// The return value is the address of a `StringArrayFlags` variable that stores the repeated values of the flag.
+func StringArrayFlag(f flagInterface, name string, usage string) *StringArrayFlags {
+ value := &StringArrayFlags{}
+ f.Var(value, name, usage)
+ return value
+}
+
+// StringArrayFlags is a wrapper of `[]string` to provider an interface for `flag.Var`
+type StringArrayFlags []string
+
+// String returns a string representation of `StringArrayFlags`
+func (i *StringArrayFlags) String() string {
+ return strings.Join(*i, ",")
+}
+
+// Set appends a value to `StringArrayFlags`
+func (i *StringArrayFlags) Set(value string) error {
+ *i = append(*i, value)
+ return nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
new file mode 100644
index 00000000000..dd99b0ed256
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
@@ -0,0 +1,174 @@
+package utilities
+
+import (
+ "sort"
+)
+
+// DoubleArray is a Double Array implementation of trie on sequences of strings.
+type DoubleArray struct {
+ // Encoding keeps an encoding from string to int
+ Encoding map[string]int
+ // Base is the base array of Double Array
+ Base []int
+ // Check is the check array of Double Array
+ Check []int
+}
+
+// NewDoubleArray builds a DoubleArray from a set of sequences of strings.
+func NewDoubleArray(seqs [][]string) *DoubleArray {
+ da := &DoubleArray{Encoding: make(map[string]int)}
+ if len(seqs) == 0 {
+ return da
+ }
+
+ encoded := registerTokens(da, seqs)
+ sort.Sort(byLex(encoded))
+
+ root := node{row: -1, col: -1, left: 0, right: len(encoded)}
+ addSeqs(da, encoded, 0, root)
+
+ for i := len(da.Base); i > 0; i-- {
+ if da.Check[i-1] != 0 {
+ da.Base = da.Base[:i]
+ da.Check = da.Check[:i]
+ break
+ }
+ }
+ return da
+}
+
+func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
+ var result [][]int
+ for _, seq := range seqs {
+ encoded := make([]int, 0, len(seq))
+ for _, token := range seq {
+ if _, ok := da.Encoding[token]; !ok {
+ da.Encoding[token] = len(da.Encoding)
+ }
+ encoded = append(encoded, da.Encoding[token])
+ }
+ result = append(result, encoded)
+ }
+ for i := range result {
+ result[i] = append(result[i], len(da.Encoding))
+ }
+ return result
+}
+
+type node struct {
+ row, col int
+ left, right int
+}
+
+func (n node) value(seqs [][]int) int {
+ return seqs[n.row][n.col]
+}
+
+func (n node) children(seqs [][]int) []*node {
+ var result []*node
+ lastVal := int(-1)
+ last := new(node)
+ for i := n.left; i < n.right; i++ {
+ if lastVal == seqs[i][n.col+1] {
+ continue
+ }
+ last.right = i
+ last = &node{
+ row: i,
+ col: n.col + 1,
+ left: i,
+ }
+ result = append(result, last)
+ }
+ last.right = n.right
+ return result
+}
+
+func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) {
+ ensureSize(da, pos)
+
+ children := n.children(seqs)
+ var i int
+ for i = 1; ; i++ {
+ ok := func() bool {
+ for _, child := range children {
+ code := child.value(seqs)
+ j := i + code
+ ensureSize(da, j)
+ if da.Check[j] != 0 {
+ return false
+ }
+ }
+ return true
+ }()
+ if ok {
+ break
+ }
+ }
+ da.Base[pos] = i
+ for _, child := range children {
+ code := child.value(seqs)
+ j := i + code
+ da.Check[j] = pos + 1
+ }
+ terminator := len(da.Encoding)
+ for _, child := range children {
+ code := child.value(seqs)
+ if code == terminator {
+ continue
+ }
+ j := i + code
+ addSeqs(da, seqs, j, *child)
+ }
+}
+
+func ensureSize(da *DoubleArray, i int) {
+ for i >= len(da.Base) {
+ da.Base = append(da.Base, make([]int, len(da.Base)+1)...)
+ da.Check = append(da.Check, make([]int, len(da.Check)+1)...)
+ }
+}
+
+type byLex [][]int
+
+func (l byLex) Len() int { return len(l) }
+func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l byLex) Less(i, j int) bool {
+ si := l[i]
+ sj := l[j]
+ var k int
+ for k = 0; k < len(si) && k < len(sj); k++ {
+ if si[k] < sj[k] {
+ return true
+ }
+ if si[k] > sj[k] {
+ return false
+ }
+ }
+ return k < len(sj)
+}
+
+// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence.
+func (da *DoubleArray) HasCommonPrefix(seq []string) bool {
+ if len(da.Base) == 0 {
+ return false
+ }
+
+ var i int
+ for _, t := range seq {
+ code, ok := da.Encoding[t]
+ if !ok {
+ break
+ }
+ j := da.Base[i] + code
+ if len(da.Check) <= j || da.Check[j] != i+1 {
+ break
+ }
+ i = j
+ }
+ j := da.Base[i] + len(da.Encoding)
+ if len(da.Check) <= j || da.Check[j] != i+1 {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/in-toto/attestation/LICENSE b/vendor/github.com/in-toto/attestation/LICENSE
new file mode 100644
index 00000000000..702a3365c06
--- /dev/null
+++ b/vendor/github.com/in-toto/attestation/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2021 in-toto Developers
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go
new file mode 100644
index 00000000000..ae912f0d1e2
--- /dev/null
+++ b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go
@@ -0,0 +1,128 @@
+/*
+Wrapper APIs for in-toto attestation ResourceDescriptor protos.
+*/
+
+package v1
+
+import (
+ "encoding/hex"
+ "errors"
+ "fmt"
+)
+
+var (
+ ErrIncorrectDigestLength = errors.New("digest has incorrect length")
+ ErrInvalidDigestEncoding = errors.New("digest is not valid hex-encoded string")
+ ErrRDRequiredField = errors.New("at least one of name, URI, or digest are required")
+)
+
+type HashAlgorithm string
+
+const (
+ AlgorithmMD5 HashAlgorithm = "md5"
+ AlgorithmSHA1 HashAlgorithm = "sha1"
+ AlgorithmSHA224 HashAlgorithm = "sha224"
+ AlgorithmSHA512_224 HashAlgorithm = "sha512_224"
+ AlgorithmSHA256 HashAlgorithm = "sha256"
+ AlgorithmSHA512_256 HashAlgorithm = "sha512_256"
+ AlgorithmSHA384 HashAlgorithm = "sha384"
+ AlgorithmSHA512 HashAlgorithm = "sha512"
+ AlgorithmSHA3_224 HashAlgorithm = "sha3_224"
+ AlgorithmSHA3_256 HashAlgorithm = "sha3_256"
+ AlgorithmSHA3_384 HashAlgorithm = "sha3_384"
+ AlgorithmSHA3_512 HashAlgorithm = "sha3_512"
+ AlgorithmGitBlob HashAlgorithm = "gitBlob"
+ AlgorithmGitCommit HashAlgorithm = "gitCommit"
+ AlgorithmGitTag HashAlgorithm = "gitTag"
+ AlgorithmGitTree HashAlgorithm = "gitTree"
+ AlgorithmDirHash HashAlgorithm = "dirHash"
+)
+
+// HashAlgorithms indexes the known algorithms in a dictionary
+// by their string value
+var HashAlgorithms = map[string]HashAlgorithm{
+ "md5": AlgorithmMD5,
+ "sha1": AlgorithmSHA1,
+ "sha224": AlgorithmSHA224,
+ "sha512_224": AlgorithmSHA512_224,
+ "sha256": AlgorithmSHA256,
+ "sha512_256": AlgorithmSHA512_256,
+ "sha384": AlgorithmSHA384,
+ "sha512": AlgorithmSHA512,
+ "sha3_224": AlgorithmSHA3_224,
+ "sha3_256": AlgorithmSHA3_256,
+ "sha3_384": AlgorithmSHA3_384,
+ "sha3_512": AlgorithmSHA3_512,
+ "gitBlob": AlgorithmGitBlob,
+ "gitCommit": AlgorithmGitCommit,
+ "gitTag": AlgorithmGitTag,
+ "gitTree": AlgorithmGitTree,
+ "dirHash": AlgorithmDirHash,
+}
+
+// HexLength returns the expected length of an algorithm's hash when hexencoded
+func (algo HashAlgorithm) HexLength() int {
+ switch algo {
+ case AlgorithmMD5:
+ return 16
+ case AlgorithmSHA1, AlgorithmGitBlob, AlgorithmGitCommit, AlgorithmGitTag, AlgorithmGitTree:
+ return 20
+ case AlgorithmSHA224, AlgorithmSHA512_224, AlgorithmSHA3_224:
+ return 28
+ case AlgorithmSHA256, AlgorithmSHA512_256, AlgorithmSHA3_256, AlgorithmDirHash:
+ return 32
+ case AlgorithmSHA384, AlgorithmSHA3_384:
+ return 48
+ case AlgorithmSHA512, AlgorithmSHA3_512:
+ return 64
+ default:
+ return 0
+ }
+}
+
+// String returns the hash algorithm name as a string
+func (algo HashAlgorithm) String() string {
+ return string(algo)
+}
+
+// Indicates if a given fixed-size hash algorithm is supported by default and returns the algorithm's
+// digest size in bytes, if supported. We assume gitCommit and dirHash are aliases for sha1 and sha256, respectively.
+//
+// SHA digest sizes from https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
+// MD5 digest size from https://www.rfc-editor.org/rfc/rfc1321.html#section-1
+func isSupportedFixedSizeAlgorithm(algString string) (bool, int) {
+ algo := HashAlgorithm(algString)
+ return algo.HexLength() > 0, algo.HexLength()
+}
+
+func (d *ResourceDescriptor) Validate() error {
+ // at least one of name, URI or digest are required
+ if d.GetName() == "" && d.GetUri() == "" && len(d.GetDigest()) == 0 {
+ return ErrRDRequiredField
+ }
+
+ if len(d.GetDigest()) > 0 {
+ for alg, digest := range d.GetDigest() {
+
+ // Per https://github.com/in-toto/attestation/blob/main/spec/v1/digest_set.md
+ // check encoding and length for supported algorithms;
+ // use of custom, unsupported algorithms is allowed and does not not generate validation errors.
+ supported, size := isSupportedFixedSizeAlgorithm(alg)
+ if supported {
+ // the in-toto spec expects a hex-encoded string in DigestSets for supported algorithms
+ hashBytes, err := hex.DecodeString(digest)
+
+ if err != nil {
+ return fmt.Errorf("%w (%s: %s)", ErrInvalidDigestEncoding, alg, digest)
+ }
+
+ // check the length of the digest
+ if len(hashBytes) != size {
+ return fmt.Errorf("%w: got %d bytes, want %d bytes (%s: %s)", ErrIncorrectDigestLength, len(hashBytes), size, alg, digest)
+ }
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go
new file mode 100644
index 00000000000..0dd94ea2634
--- /dev/null
+++ b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go
@@ -0,0 +1,195 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.6
+// protoc v4.24.4
+// source: in_toto_attestation/v1/resource_descriptor.proto
+
+package v1
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Proto representation of the in-toto v1 ResourceDescriptor.
+// https://github.com/in-toto/attestation/blob/main/spec/v1/resource_descriptor.md
+// Validation of all fields is left to the users of this proto.
+type ResourceDescriptor struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Uri string `protobuf:"bytes,2,opt,name=uri,proto3" json:"uri,omitempty"`
+ Digest map[string]string `protobuf:"bytes,3,rep,name=digest,proto3" json:"digest,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ Content []byte `protobuf:"bytes,4,opt,name=content,proto3" json:"content,omitempty"`
+ DownloadLocation string `protobuf:"bytes,5,opt,name=download_location,json=downloadLocation,proto3" json:"download_location,omitempty"`
+ MediaType string `protobuf:"bytes,6,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
+ // Per the Struct protobuf spec, this type corresponds to
+ // a JSON Object, which is truly a map under the hood.
+ // So, the Struct a) is still consistent with our specification for
+ // the `annotations` field, and b) has native support in some language
+ // bindings making their use easier in implementations.
+ // See: https://pkg.go.dev/google.golang.org/protobuf/types/known/structpb#Struct
+ Annotations *structpb.Struct `protobuf:"bytes,7,opt,name=annotations,proto3" json:"annotations,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ResourceDescriptor) Reset() {
+ *x = ResourceDescriptor{}
+ mi := &file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ResourceDescriptor) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceDescriptor) ProtoMessage() {}
+
+func (x *ResourceDescriptor) ProtoReflect() protoreflect.Message {
+ mi := &file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceDescriptor.ProtoReflect.Descriptor instead.
+func (*ResourceDescriptor) Descriptor() ([]byte, []int) {
+ return file_in_toto_attestation_v1_resource_descriptor_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ResourceDescriptor) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ResourceDescriptor) GetUri() string {
+ if x != nil {
+ return x.Uri
+ }
+ return ""
+}
+
+func (x *ResourceDescriptor) GetDigest() map[string]string {
+ if x != nil {
+ return x.Digest
+ }
+ return nil
+}
+
+func (x *ResourceDescriptor) GetContent() []byte {
+ if x != nil {
+ return x.Content
+ }
+ return nil
+}
+
+func (x *ResourceDescriptor) GetDownloadLocation() string {
+ if x != nil {
+ return x.DownloadLocation
+ }
+ return ""
+}
+
+func (x *ResourceDescriptor) GetMediaType() string {
+ if x != nil {
+ return x.MediaType
+ }
+ return ""
+}
+
+func (x *ResourceDescriptor) GetAnnotations() *structpb.Struct {
+ if x != nil {
+ return x.Annotations
+ }
+ return nil
+}
+
+var File_in_toto_attestation_v1_resource_descriptor_proto protoreflect.FileDescriptor
+
+const file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc = "" +
+ "\n" +
+ "0in_toto_attestation/v1/resource_descriptor.proto\x12\x16in_toto_attestation.v1\x1a\x1cgoogle/protobuf/struct.proto\"\xe6\x02\n" +
+ "\x12ResourceDescriptor\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12\x10\n" +
+ "\x03uri\x18\x02 \x01(\tR\x03uri\x12N\n" +
+ "\x06digest\x18\x03 \x03(\v26.in_toto_attestation.v1.ResourceDescriptor.DigestEntryR\x06digest\x12\x18\n" +
+ "\acontent\x18\x04 \x01(\fR\acontent\x12+\n" +
+ "\x11download_location\x18\x05 \x01(\tR\x10downloadLocation\x12\x1d\n" +
+ "\n" +
+ "media_type\x18\x06 \x01(\tR\tmediaType\x129\n" +
+ "\vannotations\x18\a \x01(\v2\x17.google.protobuf.StructR\vannotations\x1a9\n" +
+ "\vDigestEntry\x12\x10\n" +
+ "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
+ "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01BG\n" +
+ "\x1fio.github.intoto.attestation.v1Z$github.com/in-toto/attestation/go/v1b\x06proto3"
+
+var (
+ file_in_toto_attestation_v1_resource_descriptor_proto_rawDescOnce sync.Once
+ file_in_toto_attestation_v1_resource_descriptor_proto_rawDescData []byte
+)
+
+func file_in_toto_attestation_v1_resource_descriptor_proto_rawDescGZIP() []byte {
+ file_in_toto_attestation_v1_resource_descriptor_proto_rawDescOnce.Do(func() {
+ file_in_toto_attestation_v1_resource_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc), len(file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc)))
+ })
+ return file_in_toto_attestation_v1_resource_descriptor_proto_rawDescData
+}
+
+var file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_in_toto_attestation_v1_resource_descriptor_proto_goTypes = []any{
+ (*ResourceDescriptor)(nil), // 0: in_toto_attestation.v1.ResourceDescriptor
+ nil, // 1: in_toto_attestation.v1.ResourceDescriptor.DigestEntry
+ (*structpb.Struct)(nil), // 2: google.protobuf.Struct
+}
+var file_in_toto_attestation_v1_resource_descriptor_proto_depIdxs = []int32{
+ 1, // 0: in_toto_attestation.v1.ResourceDescriptor.digest:type_name -> in_toto_attestation.v1.ResourceDescriptor.DigestEntry
+ 2, // 1: in_toto_attestation.v1.ResourceDescriptor.annotations:type_name -> google.protobuf.Struct
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_in_toto_attestation_v1_resource_descriptor_proto_init() }
+func file_in_toto_attestation_v1_resource_descriptor_proto_init() {
+ if File_in_toto_attestation_v1_resource_descriptor_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc), len(file_in_toto_attestation_v1_resource_descriptor_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_in_toto_attestation_v1_resource_descriptor_proto_goTypes,
+ DependencyIndexes: file_in_toto_attestation_v1_resource_descriptor_proto_depIdxs,
+ MessageInfos: file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes,
+ }.Build()
+ File_in_toto_attestation_v1_resource_descriptor_proto = out.File
+ file_in_toto_attestation_v1_resource_descriptor_proto_goTypes = nil
+ file_in_toto_attestation_v1_resource_descriptor_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/in-toto/attestation/go/v1/statement.go b/vendor/github.com/in-toto/attestation/go/v1/statement.go
new file mode 100644
index 00000000000..f63d5f0d747
--- /dev/null
+++ b/vendor/github.com/in-toto/attestation/go/v1/statement.go
@@ -0,0 +1,50 @@
+/*
+Wrapper APIs for in-toto attestation Statement layer protos.
+*/
+
+package v1
+
+import "errors"
+
+const StatementTypeUri = "https://in-toto.io/Statement/v1"
+
+var (
+ ErrInvalidStatementType = errors.New("wrong statement type")
+ ErrSubjectRequired = errors.New("at least one subject required")
+ ErrDigestRequired = errors.New("at least one digest required")
+ ErrPredicateTypeRequired = errors.New("predicate type required")
+ ErrPredicateRequired = errors.New("predicate object required")
+)
+
+func (s *Statement) Validate() error {
+ if s.GetType() != StatementTypeUri {
+ return ErrInvalidStatementType
+ }
+
+ if s.GetSubject() == nil || len(s.GetSubject()) == 0 {
+ return ErrSubjectRequired
+ }
+
+ // check all resource descriptors in the subject
+ subject := s.GetSubject()
+ for _, rd := range subject {
+ if err := rd.Validate(); err != nil {
+ return err
+ }
+
+ // v1 statements require the digest to be set in the subject
+ if len(rd.GetDigest()) == 0 {
+ return ErrDigestRequired
+ }
+ }
+
+ if s.GetPredicateType() == "" {
+ return ErrPredicateTypeRequired
+ }
+
+ if s.GetPredicate() == nil {
+ return ErrPredicateRequired
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go b/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go
new file mode 100644
index 00000000000..bc76eaf2617
--- /dev/null
+++ b/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go
@@ -0,0 +1,160 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.6
+// protoc v4.24.4
+// source: in_toto_attestation/v1/statement.proto
+
+package v1
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Proto representation of the in-toto v1 Statement.
+// https://github.com/in-toto/attestation/tree/main/spec/v1
+// Validation of all fields is left to the users of this proto.
+type Statement struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Expected to always be "https://in-toto.io/Statement/v1"
+ Type string `protobuf:"bytes,1,opt,name=type,json=_type,proto3" json:"type,omitempty"`
+ Subject []*ResourceDescriptor `protobuf:"bytes,2,rep,name=subject,proto3" json:"subject,omitempty"`
+ PredicateType string `protobuf:"bytes,3,opt,name=predicate_type,json=predicateType,proto3" json:"predicate_type,omitempty"`
+ Predicate *structpb.Struct `protobuf:"bytes,4,opt,name=predicate,proto3" json:"predicate,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Statement) Reset() {
+ *x = Statement{}
+ mi := &file_in_toto_attestation_v1_statement_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Statement) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Statement) ProtoMessage() {}
+
+func (x *Statement) ProtoReflect() protoreflect.Message {
+ mi := &file_in_toto_attestation_v1_statement_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Statement.ProtoReflect.Descriptor instead.
+func (*Statement) Descriptor() ([]byte, []int) {
+ return file_in_toto_attestation_v1_statement_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Statement) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *Statement) GetSubject() []*ResourceDescriptor {
+ if x != nil {
+ return x.Subject
+ }
+ return nil
+}
+
+func (x *Statement) GetPredicateType() string {
+ if x != nil {
+ return x.PredicateType
+ }
+ return ""
+}
+
+func (x *Statement) GetPredicate() *structpb.Struct {
+ if x != nil {
+ return x.Predicate
+ }
+ return nil
+}
+
+var File_in_toto_attestation_v1_statement_proto protoreflect.FileDescriptor
+
+const file_in_toto_attestation_v1_statement_proto_rawDesc = "" +
+ "\n" +
+ "&in_toto_attestation/v1/statement.proto\x12\x16in_toto_attestation.v1\x1a\x1cgoogle/protobuf/struct.proto\x1a0in_toto_attestation/v1/resource_descriptor.proto\"\xc4\x01\n" +
+ "\tStatement\x12\x13\n" +
+ "\x04type\x18\x01 \x01(\tR\x05_type\x12D\n" +
+ "\asubject\x18\x02 \x03(\v2*.in_toto_attestation.v1.ResourceDescriptorR\asubject\x12%\n" +
+ "\x0epredicate_type\x18\x03 \x01(\tR\rpredicateType\x125\n" +
+ "\tpredicate\x18\x04 \x01(\v2\x17.google.protobuf.StructR\tpredicateBG\n" +
+ "\x1fio.github.intoto.attestation.v1Z$github.com/in-toto/attestation/go/v1b\x06proto3"
+
+var (
+ file_in_toto_attestation_v1_statement_proto_rawDescOnce sync.Once
+ file_in_toto_attestation_v1_statement_proto_rawDescData []byte
+)
+
+func file_in_toto_attestation_v1_statement_proto_rawDescGZIP() []byte {
+ file_in_toto_attestation_v1_statement_proto_rawDescOnce.Do(func() {
+ file_in_toto_attestation_v1_statement_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_in_toto_attestation_v1_statement_proto_rawDesc), len(file_in_toto_attestation_v1_statement_proto_rawDesc)))
+ })
+ return file_in_toto_attestation_v1_statement_proto_rawDescData
+}
+
+var file_in_toto_attestation_v1_statement_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_in_toto_attestation_v1_statement_proto_goTypes = []any{
+ (*Statement)(nil), // 0: in_toto_attestation.v1.Statement
+ (*ResourceDescriptor)(nil), // 1: in_toto_attestation.v1.ResourceDescriptor
+ (*structpb.Struct)(nil), // 2: google.protobuf.Struct
+}
+var file_in_toto_attestation_v1_statement_proto_depIdxs = []int32{
+ 1, // 0: in_toto_attestation.v1.Statement.subject:type_name -> in_toto_attestation.v1.ResourceDescriptor
+ 2, // 1: in_toto_attestation.v1.Statement.predicate:type_name -> google.protobuf.Struct
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_in_toto_attestation_v1_statement_proto_init() }
+func file_in_toto_attestation_v1_statement_proto_init() {
+ if File_in_toto_attestation_v1_statement_proto != nil {
+ return
+ }
+ file_in_toto_attestation_v1_resource_descriptor_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_in_toto_attestation_v1_statement_proto_rawDesc), len(file_in_toto_attestation_v1_statement_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_in_toto_attestation_v1_statement_proto_goTypes,
+ DependencyIndexes: file_in_toto_attestation_v1_statement_proto_depIdxs,
+ MessageInfos: file_in_toto_attestation_v1_statement_proto_msgTypes,
+ }.Build()
+ File_in_toto_attestation_v1_statement_proto = out.File
+ file_in_toto_attestation_v1_statement_proto_goTypes = nil
+ file_in_toto_attestation_v1_statement_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/in-toto/in-toto-golang/LICENSE b/vendor/github.com/in-toto/in-toto-golang/LICENSE
new file mode 100644
index 00000000000..963ee949e8e
--- /dev/null
+++ b/vendor/github.com/in-toto/in-toto-golang/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2018 New York University
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/attestations.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/attestations.go
new file mode 100644
index 00000000000..73aafe7e1c4
--- /dev/null
+++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/attestations.go
@@ -0,0 +1,99 @@
+package in_toto
+
+import (
+ "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common"
+ slsa01 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1"
+ slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2"
+ slsa1 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1"
+)
+
+const (
+ // StatementInTotoV01 is the statement type for the generalized link format
+ // containing statements. This is constant for all predicate types.
+ StatementInTotoV01 = "https://in-toto.io/Statement/v0.1"
+ // PredicateSPDX represents a SBOM using the SPDX standard.
+ // The SPDX mandates 'spdxVersion' field, so predicate type can omit
+ // version.
+ PredicateSPDX = "https://spdx.dev/Document"
+ // PredicateCycloneDX represents a CycloneDX SBOM
+ PredicateCycloneDX = "https://cyclonedx.org/bom"
+ // PredicateLinkV1 represents an in-toto 0.9 link.
+ PredicateLinkV1 = "https://in-toto.io/Link/v1"
+)
+
+// Subject describes the set of software artifacts the statement applies to.
+type Subject struct {
+ Name string `json:"name"`
+ Digest common.DigestSet `json:"digest"`
+}
+
+// StatementHeader defines the common fields for all statements
+type StatementHeader struct {
+ Type string `json:"_type"`
+ PredicateType string `json:"predicateType"`
+ Subject []Subject `json:"subject"`
+}
+
+/*
+Statement binds the attestation to a particular subject and identifies the
+of the predicate. This struct represents a generic statement.
+*/
+type Statement struct {
+ StatementHeader
+ // Predicate contains type speficic metadata.
+ Predicate interface{} `json:"predicate"`
+}
+
+// ProvenanceStatementSLSA01 is the definition for an entire provenance statement with SLSA 0.1 predicate.
+type ProvenanceStatementSLSA01 struct {
+ StatementHeader
+ Predicate slsa01.ProvenancePredicate `json:"predicate"`
+}
+
+// ProvenanceStatementSLSA02 is the definition for an entire provenance statement with SLSA 0.2 predicate.
+type ProvenanceStatementSLSA02 struct {
+ StatementHeader
+ Predicate slsa02.ProvenancePredicate `json:"predicate"`
+}
+
+// ProvenanceStatementSLSA1 is the definition for an entire provenance statement with SLSA 1.0 predicate.
+type ProvenanceStatementSLSA1 struct {
+ StatementHeader
+ Predicate slsa1.ProvenancePredicate `json:"predicate"`
+}
+
+// ProvenanceStatement is the definition for an entire provenance statement with SLSA 0.2 predicate.
+// Deprecated: Only version-specific provenance structs will be maintained (ProvenanceStatementSLSA01, ProvenanceStatementSLSA02).
+type ProvenanceStatement struct {
+ StatementHeader
+ Predicate slsa02.ProvenancePredicate `json:"predicate"`
+}
+
+// LinkStatement is the definition for an entire link statement.
+type LinkStatement struct {
+ StatementHeader
+ Predicate Link `json:"predicate"`
+}
+
+/*
+SPDXStatement is the definition for an entire SPDX statement.
+This is currently not implemented. Some tooling exists here:
+https://github.com/spdx/tools-golang, but this software is still in
+early state.
+This struct is the same as the generic Statement struct but is added for
+completeness
+*/
+type SPDXStatement struct {
+ StatementHeader
+ Predicate interface{} `json:"predicate"`
+}
+
+/*
+CycloneDXStatement defines a cyclonedx sbom in the predicate. It is not
+currently serialized just as its SPDX counterpart. It is an empty
+interface, like the generic Statement.
+*/
+type CycloneDXStatement struct {
+ StatementHeader
+ Predicate interface{} `json:"predicate"`
+}
diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/certconstraint.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/certconstraint.go
new file mode 100644
index 00000000000..9b1de12b182
--- /dev/null
+++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/certconstraint.go
@@ -0,0 +1,156 @@
+package in_toto
+
+import (
+ "crypto/x509"
+ "fmt"
+ "net/url"
+)
+
+const (
+ AllowAllConstraint = "*"
+)
+
+// CertificateConstraint defines the attributes a certificate must have to act as a functionary.
+// A wildcard `*` allows any value in the specified attribute, where as an empty array or value
+// asserts that the certificate must have nothing for that attribute. A certificate must have
+// every value defined in a constraint to match.
+type CertificateConstraint struct {
+ CommonName string `json:"common_name"`
+ DNSNames []string `json:"dns_names"`
+ Emails []string `json:"emails"`
+ Organizations []string `json:"organizations"`
+ Roots []string `json:"roots"`
+ URIs []string `json:"uris"`
+}
+
+// checkResult is a data structure used to hold
+// certificate constraint errors
+type checkResult struct {
+ errors []error
+}
+
+// newCheckResult initializes a new checkResult
+func newCheckResult() *checkResult {
+ return &checkResult{
+ errors: make([]error, 0),
+ }
+}
+
+// evaluate runs a constraint check on a certificate
+func (cr *checkResult) evaluate(cert *x509.Certificate, constraintCheck func(*x509.Certificate) error) *checkResult {
+ err := constraintCheck(cert)
+ if err != nil {
+ cr.errors = append(cr.errors, err)
+ }
+ return cr
+}
+
+// error reduces all of the errors into one error with a
+// combined error message. If there are no errors, nil
+// will be returned.
+func (cr *checkResult) error() error {
+ if len(cr.errors) == 0 {
+ return nil
+ }
+ return fmt.Errorf("cert failed constraints check: %+q", cr.errors)
+}
+
+// Check tests the provided certificate against the constraint. An error is returned if the certificate
+// fails any of the constraints. nil is returned if the certificate passes all of the constraints.
+func (cc CertificateConstraint) Check(cert *x509.Certificate, rootCAIDs []string, rootCertPool, intermediateCertPool *x509.CertPool) error {
+ return newCheckResult().
+ evaluate(cert, cc.checkCommonName).
+ evaluate(cert, cc.checkDNSNames).
+ evaluate(cert, cc.checkEmails).
+ evaluate(cert, cc.checkOrganizations).
+ evaluate(cert, cc.checkRoots(rootCAIDs, rootCertPool, intermediateCertPool)).
+ evaluate(cert, cc.checkURIs).
+ error()
+}
+
+// checkCommonName verifies that the certificate's common name matches the constraint.
+func (cc CertificateConstraint) checkCommonName(cert *x509.Certificate) error {
+ return checkCertConstraint("common name", []string{cc.CommonName}, []string{cert.Subject.CommonName})
+}
+
+// checkDNSNames verifies that the certificate's dns names matches the constraint.
+func (cc CertificateConstraint) checkDNSNames(cert *x509.Certificate) error {
+ return checkCertConstraint("dns name", cc.DNSNames, cert.DNSNames)
+}
+
+// checkEmails verifies that the certificate's emails matches the constraint.
+func (cc CertificateConstraint) checkEmails(cert *x509.Certificate) error {
+ return checkCertConstraint("email", cc.Emails, cert.EmailAddresses)
+}
+
+// checkOrganizations verifies that the certificate's organizations matches the constraint.
+func (cc CertificateConstraint) checkOrganizations(cert *x509.Certificate) error {
+ return checkCertConstraint("organization", cc.Organizations, cert.Subject.Organization)
+}
+
+// checkRoots verifies that the certificate's roots matches the constraint.
+// The certificates trust chain must also be verified.
+func (cc CertificateConstraint) checkRoots(rootCAIDs []string, rootCertPool, intermediateCertPool *x509.CertPool) func(*x509.Certificate) error {
+ return func(cert *x509.Certificate) error {
+ _, err := VerifyCertificateTrust(cert, rootCertPool, intermediateCertPool)
+ if err != nil {
+ return fmt.Errorf("failed to verify roots: %w", err)
+ }
+ return checkCertConstraint("root", cc.Roots, rootCAIDs)
+ }
+}
+
+// checkURIs verifies that the certificate's URIs matches the constraint.
+func (cc CertificateConstraint) checkURIs(cert *x509.Certificate) error {
+ return checkCertConstraint("uri", cc.URIs, urisToStrings(cert.URIs))
+}
+
+// urisToStrings is a helper that converts a list of URL objects to the string that represents them
+func urisToStrings(uris []*url.URL) []string {
+ res := make([]string, 0, len(uris))
+ for _, uri := range uris {
+ res = append(res, uri.String())
+ }
+
+ return res
+}
+
+// checkCertConstraint tests that the provided test values match the allowed values of the constraint.
+// All allowed values must be met one-to-one to be considered a successful match.
+func checkCertConstraint(attributeName string, constraints, values []string) error {
+ // If the only constraint is to allow all, the check succeeds
+ if len(constraints) == 1 && constraints[0] == AllowAllConstraint {
+ return nil
+ }
+
+ if len(constraints) == 1 && constraints[0] == "" {
+ constraints = []string{}
+ }
+
+ if len(values) == 1 && values[0] == "" {
+ values = []string{}
+ }
+
+ // If no constraints are specified, but the certificate has values for the attribute, then the check fails
+ if len(constraints) == 0 && len(values) > 0 {
+ return fmt.Errorf("not expecting any %s(s), but cert has %d %s(s)", attributeName, len(values), attributeName)
+ }
+
+ unmet := NewSet(constraints...)
+ for _, v := range values {
+ // if the cert has a value we didn't expect, fail early
+ if !unmet.Has(v) {
+ return fmt.Errorf("cert has an unexpected %s %s given constraints %+q", attributeName, v, constraints)
+ }
+
+ // consider the constraint met
+ unmet.Remove(v)
+ }
+
+ // if we have any unmet left after going through each test value, fail.
+ if len(unmet) > 0 {
+ return fmt.Errorf("cert with %s(s) %+q did not pass all constraints %+q", attributeName, values, constraints)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/envelope.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/envelope.go
new file mode 100644
index 00000000000..2c8afff1f75
--- /dev/null
+++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/envelope.go
@@ -0,0 +1,166 @@
+package in_toto
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+
+ "github.com/secure-systems-lab/go-securesystemslib/cjson"
+ "github.com/secure-systems-lab/go-securesystemslib/dsse"
+ "github.com/secure-systems-lab/go-securesystemslib/signerverifier"
+)
+
+// PayloadType is the payload type used for links and layouts.
+const PayloadType = "application/vnd.in-toto+json"
+
+// ErrInvalidPayloadType indicates that the envelope used an unknown payload type
+var ErrInvalidPayloadType = errors.New("unknown payload type")
+
+type Envelope struct {
+ envelope *dsse.Envelope
+ payload any
+}
+
+func loadEnvelope(env *dsse.Envelope) (*Envelope, error) {
+ e := &Envelope{envelope: env}
+
+ contentBytes, err := env.DecodeB64Payload()
+ if err != nil {
+ return nil, err
+ }
+
+ payload, err := loadPayload(contentBytes)
+ if err != nil {
+ return nil, err
+ }
+ e.payload = payload
+
+ return e, nil
+}
+
+func (e *Envelope) SetPayload(payload any) error {
+ encodedBytes, err := cjson.EncodeCanonical(payload)
+ if err != nil {
+ return err
+ }
+
+ e.payload = payload
+ e.envelope = &dsse.Envelope{
+ Payload: base64.StdEncoding.EncodeToString(encodedBytes),
+ PayloadType: PayloadType,
+ }
+
+ return nil
+}
+
+func (e *Envelope) GetPayload() any {
+ return e.payload
+}
+
+func (e *Envelope) VerifySignature(key Key) error {
+ verifier, err := getSignerVerifierFromKey(key)
+ if err != nil {
+ return err
+ }
+
+ ev, err := dsse.NewEnvelopeVerifier(verifier)
+ if err != nil {
+ return err
+ }
+
+ _, err = ev.Verify(context.Background(), e.envelope)
+ return err
+}
+
+func (e *Envelope) Sign(key Key) error {
+ signer, err := getSignerVerifierFromKey(key)
+ if err != nil {
+ return err
+ }
+
+ es, err := dsse.NewEnvelopeSigner(signer)
+ if err != nil {
+ return err
+ }
+
+ payload, err := e.envelope.DecodeB64Payload()
+ if err != nil {
+ return err
+ }
+
+ env, err := es.SignPayload(context.Background(), e.envelope.PayloadType, payload)
+ if err != nil {
+ return err
+ }
+
+ e.envelope = env
+ return nil
+}
+
+func (e *Envelope) Sigs() []Signature {
+ sigs := []Signature{}
+ for _, s := range e.envelope.Signatures {
+ sigs = append(sigs, Signature{
+ KeyID: s.KeyID,
+ Sig: s.Sig,
+ })
+ }
+ return sigs
+}
+
+func (e *Envelope) GetSignatureForKeyID(keyID string) (Signature, error) {
+ for _, s := range e.Sigs() {
+ if s.KeyID == keyID {
+ return s, nil
+ }
+ }
+
+ return Signature{}, fmt.Errorf("no signature found for key '%s'", keyID)
+}
+
+func (e *Envelope) Dump(path string) error {
+ jsonBytes, err := json.MarshalIndent(e.envelope, "", " ")
+ if err != nil {
+ return err
+ }
+
+ // Write JSON bytes to the passed path with permissions (-rw-r--r--)
+ err = os.WriteFile(path, jsonBytes, 0644)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func getSignerVerifierFromKey(key Key) (dsse.SignerVerifier, error) {
+ sslibKey := getSSLibKeyFromKey(key)
+
+ switch sslibKey.KeyType {
+ case signerverifier.RSAKeyType:
+ return signerverifier.NewRSAPSSSignerVerifierFromSSLibKey(&sslibKey)
+ case signerverifier.ED25519KeyType:
+ return signerverifier.NewED25519SignerVerifierFromSSLibKey(&sslibKey)
+ case signerverifier.ECDSAKeyType:
+ return signerverifier.NewECDSASignerVerifierFromSSLibKey(&sslibKey)
+ }
+
+ return nil, ErrUnsupportedKeyType
+}
+
+func getSSLibKeyFromKey(key Key) signerverifier.SSLibKey {
+ return signerverifier.SSLibKey{
+ KeyType: key.KeyType,
+ KeyIDHashAlgorithms: key.KeyIDHashAlgorithms,
+ KeyID: key.KeyID,
+ Scheme: key.Scheme,
+ KeyVal: signerverifier.KeyVal{
+ Public: key.KeyVal.Public,
+ Private: key.KeyVal.Private,
+ Certificate: key.KeyVal.Certificate,
+ },
+ }
+}
diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/hashlib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/hashlib.go
new file mode 100644
index 00000000000..bdfc65d69f9
--- /dev/null
+++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/hashlib.go
@@ -0,0 +1,30 @@
+package in_toto
+
+import (
+ "crypto/sha256"
+ "crypto/sha512"
+ "hash"
+)
+
+/*
+getHashMapping returns a mapping from hash algorithm to supported hash
+interface.
+*/
+func getHashMapping() map[string]func() hash.Hash {
+ return map[string]func() hash.Hash{
+ "sha256": sha256.New,
+ "sha512": sha512.New,
+ "sha384": sha512.New384,
+ }
+}
+
+/*
+hashToHex calculates the hash over data based on hash algorithm h.
+*/
+func hashToHex(h hash.Hash, data []byte) []byte {
+ h.Write(data)
+ // We need to use h.Sum(nil) here, because otherwise hash.Sum() appends
+ // the hash to the passed data. So instead of having only the hash
+ // we would get: "dataHASH"
+ return h.Sum(nil)
+}
diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go
new file mode 100644
index 00000000000..52429ca44be
--- /dev/null
+++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go
@@ -0,0 +1,668 @@
+package in_toto
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/hex"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/secure-systems-lab/go-securesystemslib/cjson"
+)
+
+// ErrFailedPEMParsing gets returned when PKCS1, PKCS8 or PKIX key parsing fails
+var ErrFailedPEMParsing = errors.New("failed parsing the PEM block: unsupported PEM type")
+
+// ErrNoPEMBlock gets triggered when there is no PEM block in the provided file
+var ErrNoPEMBlock = errors.New("failed to decode the data as PEM block (are you sure this is a pem file?)")
+
+// ErrUnsupportedKeyType is returned when we are dealing with a key type different to ed25519 or RSA
+var ErrUnsupportedKeyType = errors.New("unsupported key type")
+
+// ErrInvalidSignature is returned when the signature is invalid
+var ErrInvalidSignature = errors.New("invalid signature")
+
+// ErrInvalidKey is returned when a given key is none of RSA, ECDSA or ED25519
+var ErrInvalidKey = errors.New("invalid key")
+
+const (
+ rsaKeyType string = "rsa"
+ ecdsaKeyType string = "ecdsa"
+ ed25519KeyType string = "ed25519"
+ rsassapsssha256Scheme string = "rsassa-pss-sha256"
+ ecdsaSha2nistp224 string = "ecdsa-sha2-nistp224"
+ ecdsaSha2nistp256 string = "ecdsa-sha2-nistp256"
+ ecdsaSha2nistp384 string = "ecdsa-sha2-nistp384"
+ ecdsaSha2nistp521 string = "ecdsa-sha2-nistp521"
+ ed25519Scheme string = "ed25519"
+ pemPublicKey string = "PUBLIC KEY"
+ pemPrivateKey string = "PRIVATE KEY"
+ pemRSAPrivateKey string = "RSA PRIVATE KEY"
+)
+
+/*
+getSupportedKeyIDHashAlgorithms returns a string slice of supported
+KeyIDHashAlgorithms. We need to use this function instead of a constant,
+because Go does not support global constant slices.
+*/
+func getSupportedKeyIDHashAlgorithms() Set {
+ return NewSet("sha256", "sha512")
+}
+
+/*
+getSupportedRSASchemes returns a string slice of supported RSA Key schemes.
+We need to use this function instead of a constant because Go does not support
+global constant slices.
+*/
+func getSupportedRSASchemes() []string {
+ return []string{rsassapsssha256Scheme}
+}
+
+/*
+getSupportedEcdsaSchemes returns a string slice of supported ecdsa Key schemes.
+We need to use this function instead of a constant because Go does not support
+global constant slices.
+*/
+func getSupportedEcdsaSchemes() []string {
+ return []string{ecdsaSha2nistp224, ecdsaSha2nistp256, ecdsaSha2nistp384, ecdsaSha2nistp521}
+}
+
+/*
+getSupportedEd25519Schemes returns a string slice of supported ed25519 Key
+schemes. We need to use this function instead of a constant because Go does
+not support global constant slices.
+*/
+func getSupportedEd25519Schemes() []string {
+ return []string{ed25519Scheme}
+}
+
+/*
+generateKeyID creates a partial key map and generates the key ID
+based on the created partial key map via the SHA256 method.
+The resulting keyID will be directly saved in the corresponding key object.
+On success generateKeyID will return nil, in case of errors while encoding
+there will be an error.
+*/
+func (k *Key) generateKeyID() error {
+ // Create partial key map used to create the keyid
+ // Unfortunately, we can't use the Key object because this also carries
+ // yet unwanted fields, such as KeyID and KeyVal.Private and therefore
+ // produces a different hash. We generate the keyID exactly as we do in
+ // the securesystemslib to keep interoperability between other in-toto
+ // implementations.
+ var keyToBeHashed = map[string]interface{}{
+ "keytype": k.KeyType,
+ "scheme": k.Scheme,
+ "keyid_hash_algorithms": k.KeyIDHashAlgorithms,
+ "keyval": map[string]string{
+ "public": k.KeyVal.Public,
+ },
+ }
+ keyCanonical, err := cjson.EncodeCanonical(keyToBeHashed)
+ if err != nil {
+ return err
+ }
+ // calculate sha256 and return string representation of keyID
+ keyHashed := sha256.Sum256(keyCanonical)
+ k.KeyID = fmt.Sprintf("%x", keyHashed)
+ err = validateKey(*k)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+generatePEMBlock creates a PEM block from scratch via the keyBytes and the pemType.
+If successful it returns a PEM block as []byte slice. This function should always
+succeed, if keyBytes is empty the PEM block will have an empty byte block.
+Therefore only header and footer will exist.
+*/
+func generatePEMBlock(keyBytes []byte, pemType string) []byte {
+ // construct PEM block
+ pemBlock := &pem.Block{
+ Type: pemType,
+ Headers: nil,
+ Bytes: keyBytes,
+ }
+ return pem.EncodeToMemory(pemBlock)
+}
+
+/*
+setKeyComponents sets all components in our key object.
+Furthermore it makes sure to remove any trailing and leading whitespaces or newlines.
+We treat key types differently for interoperability reasons to the in-toto python
+implementation and the securesystemslib.
+*/
+func (k *Key) setKeyComponents(pubKeyBytes []byte, privateKeyBytes []byte, keyType string, scheme string, KeyIDHashAlgorithms []string) error {
+ // assume we have a privateKey if the key size is bigger than 0
+
+ switch keyType {
+ case rsaKeyType:
+ if len(privateKeyBytes) > 0 {
+ k.KeyVal = KeyVal{
+ Private: strings.TrimSpace(string(generatePEMBlock(privateKeyBytes, pemRSAPrivateKey))),
+ Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))),
+ }
+ } else {
+ k.KeyVal = KeyVal{
+ Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))),
+ }
+ }
+ case ecdsaKeyType:
+ if len(privateKeyBytes) > 0 {
+ k.KeyVal = KeyVal{
+ Private: strings.TrimSpace(string(generatePEMBlock(privateKeyBytes, pemPrivateKey))),
+ Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))),
+ }
+ } else {
+ k.KeyVal = KeyVal{
+ Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))),
+ }
+ }
+ case ed25519KeyType:
+ if len(privateKeyBytes) > 0 {
+ k.KeyVal = KeyVal{
+ Private: strings.TrimSpace(hex.EncodeToString(privateKeyBytes)),
+ Public: strings.TrimSpace(hex.EncodeToString(pubKeyBytes)),
+ }
+ } else {
+ k.KeyVal = KeyVal{
+ Public: strings.TrimSpace(hex.EncodeToString(pubKeyBytes)),
+ }
+ }
+ default:
+ return fmt.Errorf("%w: %s", ErrUnsupportedKeyType, keyType)
+ }
+ k.KeyType = keyType
+ k.Scheme = scheme
+ k.KeyIDHashAlgorithms = KeyIDHashAlgorithms
+ if err := k.generateKeyID(); err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+parseKey tries to parse a PEM []byte slice. Using the following standards
+in the given order:
+
+ - PKCS8
+ - PKCS1
+ - PKIX
+
+On success it returns the parsed key and nil.
+On failure it returns nil and the error ErrFailedPEMParsing
+*/
+func parseKey(data []byte) (interface{}, error) {
+ key, err := x509.ParsePKCS8PrivateKey(data)
+ if err == nil {
+ return key, nil
+ }
+ key, err = x509.ParsePKCS1PrivateKey(data)
+ if err == nil {
+ return key, nil
+ }
+ key, err = x509.ParsePKIXPublicKey(data)
+ if err == nil {
+ return key, nil
+ }
+ key, err = x509.ParseCertificate(data)
+ if err == nil {
+ return key, nil
+ }
+ key, err = x509.ParseECPrivateKey(data)
+ if err == nil {
+ return key, nil
+ }
+ return nil, ErrFailedPEMParsing
+}
+
+/*
+decodeAndParse receives potential PEM bytes decodes them via pem.Decode
+and pushes them to parseKey. If any error occurs during this process,
+the function will return nil and an error (either ErrFailedPEMParsing
+or ErrNoPEMBlock). On success it will return the decoded pemData, the
+key object interface and nil as error. We need the decoded pemData,
+because LoadKey relies on decoded pemData for operating system
+interoperability.
+*/
+func decodeAndParse(pemBytes []byte) (*pem.Block, interface{}, error) {
+ // pem.Decode returns the parsed pem block and a rest.
+ // The rest is everything, that could not be parsed as PEM block.
+ // Therefore we can drop this via using the blank identifier "_"
+ data, _ := pem.Decode(pemBytes)
+ if data == nil {
+ return nil, nil, ErrNoPEMBlock
+ }
+
+ // Try to load private key, if this fails try to load
+ // key as public key
+ key, err := parseKey(data.Bytes)
+ if err != nil {
+ return nil, nil, err
+ }
+ return data, key, nil
+}
+
+/*
+LoadKey loads the key file at specified file path into the key object.
+It automatically derives the PEM type and the key type.
+Right now the following PEM types are supported:
+
+ - PKCS1 for private keys
+ - PKCS8 for private keys
+ - PKIX for public keys
+
+The following key types are supported and will be automatically assigned to
+the key type field:
+
+ - ed25519
+ - rsa
+ - ecdsa
+
+The following schemes are supported:
+
+ - ed25519 -> ed25519
+ - rsa -> rsassa-pss-sha256
+ - ecdsa -> ecdsa-sha256-nistp256
+
+Note that, this behavior is consistent with the securesystemslib, except for
+ecdsa. We do not use the scheme string as key type in in-toto-golang.
+Instead we are going with a ecdsa/ecdsa-sha2-nistp256 pair.
+
+On success it will return nil. The following errors can happen:
+
+ - path not found or not readable
+ - no PEM block in the loaded file
+ - no valid PKCS8/PKCS1 private key or PKIX public key
+ - errors while marshalling
+ - unsupported key types
+*/
+func (k *Key) LoadKey(path string, scheme string, KeyIDHashAlgorithms []string) error {
+ pemFile, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer pemFile.Close()
+
+ err = k.LoadKeyReader(pemFile, scheme, KeyIDHashAlgorithms)
+ if err != nil {
+ return err
+ }
+
+ return pemFile.Close()
+}
+
+func (k *Key) LoadKeyDefaults(path string) error {
+ pemFile, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer pemFile.Close()
+
+ err = k.LoadKeyReaderDefaults(pemFile)
+ if err != nil {
+ return err
+ }
+
+ return pemFile.Close()
+}
+
+// LoadKeyReader loads the key from a supplied reader. The logic matches LoadKey otherwise.
+func (k *Key) LoadKeyReader(r io.Reader, scheme string, KeyIDHashAlgorithms []string) error {
+ if r == nil {
+ return ErrNoPEMBlock
+ }
+ // Read key bytes
+ pemBytes, err := io.ReadAll(r)
+ if err != nil {
+ return err
+ }
+ // decodeAndParse returns the pemData for later use
+ // and a parsed key object (for operations on that key, like extracting the public Key)
+ pemData, key, err := decodeAndParse(pemBytes)
+ if err != nil {
+ return err
+ }
+
+ return k.loadKey(key, pemData, scheme, KeyIDHashAlgorithms)
+}
+
+func (k *Key) LoadKeyReaderDefaults(r io.Reader) error {
+ if r == nil {
+ return ErrNoPEMBlock
+ }
+ // Read key bytes
+ pemBytes, err := io.ReadAll(r)
+ if err != nil {
+ return err
+ }
+ // decodeAndParse returns the pemData for later use
+ // and a parsed key object (for operations on that key, like extracting the public Key)
+ pemData, key, err := decodeAndParse(pemBytes)
+ if err != nil {
+ return err
+ }
+
+ scheme, keyIDHashAlgorithms, err := getDefaultKeyScheme(key)
+ if err != nil {
+ return err
+ }
+
+ return k.loadKey(key, pemData, scheme, keyIDHashAlgorithms)
+}
+
+func getDefaultKeyScheme(key interface{}) (scheme string, keyIDHashAlgorithms []string, err error) {
+ keyIDHashAlgorithms = []string{"sha256", "sha512"}
+
+ switch k := key.(type) {
+ case *rsa.PublicKey, *rsa.PrivateKey:
+ scheme = rsassapsssha256Scheme
+ case ed25519.PrivateKey, ed25519.PublicKey:
+ scheme = ed25519Scheme
+ case *ecdsa.PrivateKey, *ecdsa.PublicKey:
+ scheme = ecdsaSha2nistp256
+ case *x509.Certificate:
+ return getDefaultKeyScheme(k.PublicKey)
+ default:
+ err = ErrUnsupportedKeyType
+ }
+
+ return scheme, keyIDHashAlgorithms, err
+}
+
+func (k *Key) loadKey(keyObj interface{}, pemData *pem.Block, scheme string, keyIDHashAlgorithms []string) error {
+ switch key := keyObj.(type) {
+ case *rsa.PublicKey:
+ pubKeyBytes, err := x509.MarshalPKIXPublicKey(key)
+ if err != nil {
+ return err
+ }
+ if err := k.setKeyComponents(pubKeyBytes, []byte{}, rsaKeyType, scheme, keyIDHashAlgorithms); err != nil {
+ return err
+ }
+ case *rsa.PrivateKey:
+ // Note: RSA Public Keys will get stored as X.509 SubjectPublicKeyInfo (RFC5280)
+ // This behavior is consistent to the securesystemslib
+ pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.Public())
+ if err != nil {
+ return err
+ }
+ if err := k.setKeyComponents(pubKeyBytes, pemData.Bytes, rsaKeyType, scheme, keyIDHashAlgorithms); err != nil {
+ return err
+ }
+ case ed25519.PublicKey:
+ if err := k.setKeyComponents(key, []byte{}, ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil {
+ return err
+ }
+ case ed25519.PrivateKey:
+ pubKeyBytes := key.Public()
+ if err := k.setKeyComponents(pubKeyBytes.(ed25519.PublicKey), key, ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil {
+ return err
+ }
+ case *ecdsa.PrivateKey:
+ pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.Public())
+ if err != nil {
+ return err
+ }
+ if err := k.setKeyComponents(pubKeyBytes, pemData.Bytes, ecdsaKeyType, scheme, keyIDHashAlgorithms); err != nil {
+ return err
+ }
+ case *ecdsa.PublicKey:
+ pubKeyBytes, err := x509.MarshalPKIXPublicKey(key)
+ if err != nil {
+ return err
+ }
+ if err := k.setKeyComponents(pubKeyBytes, []byte{}, ecdsaKeyType, scheme, keyIDHashAlgorithms); err != nil {
+ return err
+ }
+ case *x509.Certificate:
+ err := k.loadKey(key.PublicKey, pemData, scheme, keyIDHashAlgorithms)
+ if err != nil {
+ return err
+ }
+
+ k.KeyVal.Certificate = string(pem.EncodeToMemory(pemData))
+
+ default:
+ // We should never get here, because we implement all from Go supported Key Types
+ return errors.New("unexpected Error in LoadKey function")
+ }
+
+ return nil
+}
+
+/*
+GenerateSignature will automatically detect the key type and sign the signable data
+with the provided key. If everything goes right GenerateSignature will return
+a for the key valid signature and err=nil. If something goes wrong it will
+return a not initialized signature and an error. Possible errors are:
+
+ - ErrNoPEMBlock
+ - ErrUnsupportedKeyType
+
+Currently supported is only one scheme per key.
+
+Note that in-toto-golang has different requirements to an ecdsa key.
+In in-toto-golang we use the string 'ecdsa' as string for the key type.
+In the key scheme we use: ecdsa-sha2-nistp256.
+*/
+func GenerateSignature(signable []byte, key Key) (Signature, error) {
+ err := validateKey(key)
+ if err != nil {
+ return Signature{}, err
+ }
+ var signature Signature
+ var signatureBuffer []byte
+ hashMapping := getHashMapping()
+ // The following switch block is needed for keeping interoperability
+ // with the securesystemslib and the python implementation
+ // in which we are storing RSA keys in PEM format, but ed25519 keys hex encoded.
+ switch key.KeyType {
+ case rsaKeyType:
+ // We do not need the pemData here, so we can throw it away via '_'
+ _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private))
+ if err != nil {
+ return Signature{}, err
+ }
+ parsedKey, ok := parsedKey.(*rsa.PrivateKey)
+ if !ok {
+ return Signature{}, ErrKeyKeyTypeMismatch
+ }
+ switch key.Scheme {
+ case rsassapsssha256Scheme:
+ hashed := hashToHex(hashMapping["sha256"](), signable)
+ // We use rand.Reader as secure random source for rsa.SignPSS()
+ signatureBuffer, err = rsa.SignPSS(rand.Reader, parsedKey.(*rsa.PrivateKey), crypto.SHA256, hashed,
+ &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256})
+ if err != nil {
+ return signature, err
+ }
+ default:
+ // supported key schemes will get checked in validateKey
+ panic("unexpected Error in GenerateSignature function")
+ }
+ case ecdsaKeyType:
+ // We do not need the pemData here, so we can throw it away via '_'
+ _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private))
+ if err != nil {
+ return Signature{}, err
+ }
+ parsedKey, ok := parsedKey.(*ecdsa.PrivateKey)
+ if !ok {
+ return Signature{}, ErrKeyKeyTypeMismatch
+ }
+ curveSize := parsedKey.(*ecdsa.PrivateKey).Curve.Params().BitSize
+ var hashed []byte
+ if err := matchEcdsaScheme(curveSize, key.Scheme); err != nil {
+ return Signature{}, ErrCurveSizeSchemeMismatch
+ }
+ // implement https://tools.ietf.org/html/rfc5656#section-6.2.1
+ // We determine the curve size and choose the correct hashing
+ // method based on the curveSize
+ switch {
+ case curveSize <= 256:
+ hashed = hashToHex(hashMapping["sha256"](), signable)
+ case 256 < curveSize && curveSize <= 384:
+ hashed = hashToHex(hashMapping["sha384"](), signable)
+ case curveSize > 384:
+ hashed = hashToHex(hashMapping["sha512"](), signable)
+ default:
+ panic("unexpected Error in GenerateSignature function")
+ }
+ // Generate the ecdsa signature on the same way, as we do in the securesystemslib
+ // We are marshalling the ecdsaSignature struct as ASN.1 INTEGER SEQUENCES
+ // into an ASN.1 Object.
+ signatureBuffer, err = ecdsa.SignASN1(rand.Reader, parsedKey.(*ecdsa.PrivateKey), hashed[:])
+ if err != nil {
+ return signature, err
+ }
+ case ed25519KeyType:
+ // We do not need a scheme switch here, because ed25519
+ // only consist of sha256 and curve25519.
+ privateHex, err := hex.DecodeString(key.KeyVal.Private)
+ if err != nil {
+ return signature, ErrInvalidHexString
+ }
+ // Note: We can directly use the key for signing and do not
+ // need to use ed25519.NewKeyFromSeed().
+ signatureBuffer = ed25519.Sign(privateHex, signable)
+ default:
+ // We should never get here, because we call validateKey in the first
+ // line of the function.
+ panic("unexpected Error in GenerateSignature function")
+ }
+ signature.Sig = hex.EncodeToString(signatureBuffer)
+ signature.KeyID = key.KeyID
+ signature.Certificate = key.KeyVal.Certificate
+ return signature, nil
+}
+
+/*
+VerifySignature will verify unverified byte data via a passed key and signature.
+Supported key types are:
+
+ - rsa
+ - ed25519
+ - ecdsa
+
+When encountering an RSA key, VerifySignature will decode the PEM block in the key
+and will call rsa.VerifyPSS() for verifying the RSA signature.
+When encountering an ed25519 key, VerifySignature will decode the hex string encoded
+public key and will use ed25519.Verify() for verifying the ed25519 signature.
+When the given key is an ecdsa key, VerifySignature will unmarshall the ASN1 object
+and will use the retrieved ecdsa components 'r' and 's' for verifying the signature.
+On success it will return nil. In case of an unsupported key type or any other error
+it will return an error.
+
+Note that in-toto-golang has different requirements to an ecdsa key.
+In in-toto-golang we use the string 'ecdsa' as string for the key type.
+In the key scheme we use: ecdsa-sha2-nistp256.
+*/
+func VerifySignature(key Key, sig Signature, unverified []byte) error {
+ err := validateKey(key)
+ if err != nil {
+ return err
+ }
+ sigBytes, err := hex.DecodeString(sig.Sig)
+ if err != nil {
+ return err
+ }
+ hashMapping := getHashMapping()
+ switch key.KeyType {
+ case rsaKeyType:
+ // We do not need the pemData here, so we can throw it away via '_'
+ _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public))
+ if err != nil {
+ return err
+ }
+ parsedKey, ok := parsedKey.(*rsa.PublicKey)
+ if !ok {
+ return ErrKeyKeyTypeMismatch
+ }
+ switch key.Scheme {
+ case rsassapsssha256Scheme:
+ hashed := hashToHex(hashMapping["sha256"](), unverified)
+ err = rsa.VerifyPSS(parsedKey.(*rsa.PublicKey), crypto.SHA256, hashed, sigBytes, &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256})
+ if err != nil {
+ return fmt.Errorf("%w: %s", ErrInvalidSignature, err)
+ }
+ default:
+ // supported key schemes will get checked in validateKey
+ panic("unexpected Error in VerifySignature function")
+ }
+ case ecdsaKeyType:
+ // We do not need the pemData here, so we can throw it away via '_'
+ _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public))
+ if err != nil {
+ return err
+ }
+ parsedKey, ok := parsedKey.(*ecdsa.PublicKey)
+ if !ok {
+ return ErrKeyKeyTypeMismatch
+ }
+ curveSize := parsedKey.(*ecdsa.PublicKey).Curve.Params().BitSize
+ var hashed []byte
+ if err := matchEcdsaScheme(curveSize, key.Scheme); err != nil {
+ return ErrCurveSizeSchemeMismatch
+ }
+ // implement https://tools.ietf.org/html/rfc5656#section-6.2.1
+ // We determine the curve size and choose the correct hashing
+ // method based on the curveSize
+ switch {
+ case curveSize <= 256:
+ hashed = hashToHex(hashMapping["sha256"](), unverified)
+ case 256 < curveSize && curveSize <= 384:
+ hashed = hashToHex(hashMapping["sha384"](), unverified)
+ case curveSize > 384:
+ hashed = hashToHex(hashMapping["sha512"](), unverified)
+ default:
+ panic("unexpected Error in VerifySignature function")
+ }
+ if ok := ecdsa.VerifyASN1(parsedKey.(*ecdsa.PublicKey), hashed[:], sigBytes); !ok {
+ return ErrInvalidSignature
+ }
+ case ed25519KeyType:
+ // We do not need a scheme switch here, because ed25519
+ // only consist of sha256 and curve25519.
+ pubHex, err := hex.DecodeString(key.KeyVal.Public)
+ if err != nil {
+ return ErrInvalidHexString
+ }
+ if ok := ed25519.Verify(pubHex, unverified, sigBytes); !ok {
+ return fmt.Errorf("%w: ed25519", ErrInvalidSignature)
+ }
+ default:
+ // We should never get here, because we call validateKey in the first
+ // line of the function.
+ panic("unexpected Error in VerifySignature function")
+ }
+ return nil
+}
+
+/*
+VerifyCertificateTrust verifies that the certificate has a chain of trust
+to a root in rootCertPool, possibly using any intermediates in
+intermediateCertPool
+*/
+func VerifyCertificateTrust(cert *x509.Certificate, rootCertPool, intermediateCertPool *x509.CertPool) ([][]*x509.Certificate, error) {
+ verifyOptions := x509.VerifyOptions{
+ Roots: rootCertPool,
+ Intermediates: intermediateCertPool,
+ }
+ chains, err := cert.Verify(verifyOptions)
+ if len(chains) == 0 || err != nil {
+ return nil, fmt.Errorf("cert cannot be verified by provided roots and intermediates")
+ }
+ return chains, nil
+}
diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/match.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/match.go
new file mode 100644
index 00000000000..52373aa75f5
--- /dev/null
+++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/match.go
@@ -0,0 +1,227 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found at https://golang.org/LICENSE.
+
+// this is a modified version of path.Match that removes handling of path separators
+
+package in_toto
+
+import (
+ "errors"
+ "unicode/utf8"
+)
+
+// errBadPattern indicates a pattern was malformed.
+var errBadPattern = errors.New("syntax error in pattern")
+
+// match reports whether name matches the shell pattern.
+// The pattern syntax is:
+//
+// pattern:
+// { term }
+// term:
+// '*' matches any sequence of non-/ characters
+// '?' matches any single non-/ character
+// '[' [ '^' ] { character-range } ']'
+// character class (must be non-empty)
+// c matches character c (c != '*', '?', '\\', '[')
+// '\\' c matches character c
+//
+// character-range:
+// c matches character c (c != '\\', '-', ']')
+// '\\' c matches character c
+// lo '-' hi matches character c for lo <= c <= hi
+//
+// Match requires pattern to match all of name, not just a substring.
+// The only possible returned error is ErrBadPattern, when pattern
+// is malformed.
+func match(pattern, name string) (matched bool, err error) {
+Pattern:
+ for len(pattern) > 0 {
+ var star bool
+ var chunk string
+ star, chunk, pattern = scanChunk(pattern)
+ if star && chunk == "" {
+ // Trailing * matches everything
+ return true, nil
+ }
+ // Look for match at current position.
+ t, ok, err := matchChunk(chunk, name)
+ // if we're the last chunk, make sure we've exhausted the name
+ // otherwise we'll give a false result even if we could still match
+ // using the star
+ if ok && (len(t) == 0 || len(pattern) > 0) {
+ name = t
+ continue
+ }
+ if err != nil {
+ return false, err
+ }
+ if star {
+ // Look for match skipping i+1 bytes.
+ for i := 0; i < len(name); i++ {
+ t, ok, err := matchChunk(chunk, name[i+1:])
+ if ok {
+ // if we're the last chunk, make sure we exhausted the name
+ if len(pattern) == 0 && len(t) > 0 {
+ continue
+ }
+ name = t
+ continue Pattern
+ }
+ if err != nil {
+ return false, err
+ }
+ }
+ }
+ // Before returning false with no error,
+ // check that the remainder of the pattern is syntactically valid.
+ for len(pattern) > 0 {
+ _, chunk, pattern = scanChunk(pattern)
+ if _, _, err := matchChunk(chunk, ""); err != nil {
+ return false, err
+ }
+ }
+ return false, nil
+ }
+ return len(name) == 0, nil
+}
+
+// scanChunk gets the next segment of pattern, which is a non-star string
+// possibly preceded by a star.
+func scanChunk(pattern string) (star bool, chunk, rest string) {
+ for len(pattern) > 0 && pattern[0] == '*' {
+ pattern = pattern[1:]
+ star = true
+ }
+ inrange := false
+ var i int
+Scan:
+ for i = 0; i < len(pattern); i++ {
+ switch pattern[i] {
+ case '\\':
+ // error check handled in matchChunk: bad pattern.
+ if i+1 < len(pattern) {
+ i++
+ }
+ case '[':
+ inrange = true
+ case ']':
+ inrange = false
+ case '*':
+ if !inrange {
+ break Scan
+ }
+ }
+ }
+ return star, pattern[0:i], pattern[i:]
+}
+
+// matchChunk checks whether chunk matches the beginning of s.
+// If so, it returns the remainder of s (after the match).
+// Chunk is all single-character operators: literals, char classes, and ?.
+func matchChunk(chunk, s string) (rest string, ok bool, err error) {
+ // failed records whether the match has failed.
+ // After the match fails, the loop continues on processing chunk,
+ // checking that the pattern is well-formed but no longer reading s.
+ failed := false
+ for len(chunk) > 0 {
+ if !failed && len(s) == 0 {
+ failed = true
+ }
+ switch chunk[0] {
+ case '[':
+ // character class
+ var r rune
+ if !failed {
+ var n int
+ r, n = utf8.DecodeRuneInString(s)
+ s = s[n:]
+ }
+ chunk = chunk[1:]
+ // possibly negated
+ negated := false
+ if len(chunk) > 0 && chunk[0] == '^' {
+ negated = true
+ chunk = chunk[1:]
+ }
+ // parse all ranges
+ match := false
+ nrange := 0
+ for {
+ if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 {
+ chunk = chunk[1:]
+ break
+ }
+ var lo, hi rune
+ if lo, chunk, err = getEsc(chunk); err != nil {
+ return "", false, err
+ }
+ hi = lo
+ if chunk[0] == '-' {
+ if hi, chunk, err = getEsc(chunk[1:]); err != nil {
+ return "", false, err
+ }
+ }
+ if lo <= r && r <= hi {
+ match = true
+ }
+ nrange++
+ }
+ if match == negated {
+ failed = true
+ }
+
+ case '?':
+ if !failed {
+ _, n := utf8.DecodeRuneInString(s)
+ s = s[n:]
+ }
+ chunk = chunk[1:]
+
+ case '\\':
+ chunk = chunk[1:]
+ if len(chunk) == 0 {
+ return "", false, errBadPattern
+ }
+ fallthrough
+
+ default:
+ if !failed {
+ if chunk[0] != s[0] {
+ failed = true
+ }
+ s = s[1:]
+ }
+ chunk = chunk[1:]
+ }
+ }
+ if failed {
+ return "", false, nil
+ }
+ return s, true, nil
+}
+
+// getEsc gets a possibly-escaped character from chunk, for a character class.
+func getEsc(chunk string) (r rune, nchunk string, err error) {
+ if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' {
+ err = errBadPattern
+ return
+ }
+ if chunk[0] == '\\' {
+ chunk = chunk[1:]
+ if len(chunk) == 0 {
+ err = errBadPattern
+ return
+ }
+ }
+ r, n := utf8.DecodeRuneInString(chunk)
+ if r == utf8.RuneError && n == 1 {
+ err = errBadPattern
+ }
+ nchunk = chunk[n:]
+ if len(nchunk) == 0 {
+ err = errBadPattern
+ }
+ return
+}
diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go
new file mode 100644
index 00000000000..f56b784ea0c
--- /dev/null
+++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go
@@ -0,0 +1,967 @@
+package in_toto
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/secure-systems-lab/go-securesystemslib/cjson"
+ "github.com/secure-systems-lab/go-securesystemslib/dsse"
+)
+
+/*
+KeyVal contains the actual values of a key, as opposed to key metadata such as
+a key identifier or key type. For RSA keys, the key value is a pair of public
+and private keys in PEM format stored as strings. For public keys the Private
+field may be an empty string.
+*/
+type KeyVal struct {
+ Private string `json:"private,omitempty"`
+ Public string `json:"public"`
+ Certificate string `json:"certificate,omitempty"`
+}
+
+/*
+Key represents a generic in-toto key that contains key metadata, such as an
+identifier, supported hash algorithms to create the identifier, the key type
+and the supported signature scheme, and the actual key value.
+*/
+type Key struct {
+ KeyID string `json:"keyid"`
+ KeyIDHashAlgorithms []string `json:"keyid_hash_algorithms"`
+ KeyType string `json:"keytype"`
+ KeyVal KeyVal `json:"keyval"`
+ Scheme string `json:"scheme"`
+}
+
+// ErrEmptyKeyField will be thrown if a field in our Key struct is empty.
+var ErrEmptyKeyField = errors.New("empty field in key")
+
+// ErrInvalidHexString will be thrown, if a string doesn't match a hex string.
+var ErrInvalidHexString = errors.New("invalid hex string")
+
+// ErrSchemeKeyTypeMismatch will be thrown, if the given scheme and key type are not supported together.
+var ErrSchemeKeyTypeMismatch = errors.New("the scheme and key type are not supported together")
+
+// ErrUnsupportedKeyIDHashAlgorithms will be thrown, if the specified KeyIDHashAlgorithms is not supported.
+var ErrUnsupportedKeyIDHashAlgorithms = errors.New("the given keyID hash algorithm is not supported")
+
+// ErrKeyKeyTypeMismatch will be thrown, if the specified keyType does not match the key
+var ErrKeyKeyTypeMismatch = errors.New("the given key does not match its key type")
+
+// ErrNoPublicKey gets returned when the private key value is not empty.
+var ErrNoPublicKey = errors.New("the given key is not a public key")
+
+// ErrCurveSizeSchemeMismatch gets returned, when the scheme and curve size are incompatible
+// for example: curve size = "521" and scheme = "ecdsa-sha2-nistp224"
+var ErrCurveSizeSchemeMismatch = errors.New("the scheme does not match the curve size")
+
+/*
+matchEcdsaScheme checks if the scheme suffix, matches the ecdsa key
+curve size. We do not need a full regex match here, because
+our validateKey functions are already checking for a valid scheme string.
+*/
+func matchEcdsaScheme(curveSize int, scheme string) error {
+ if !strings.HasSuffix(scheme, strconv.Itoa(curveSize)) {
+ return ErrCurveSizeSchemeMismatch
+ }
+ return nil
+}
+
+/*
+validateHexString is used to validate that a string passed to it contains
+only valid hexadecimal characters.
+*/
+func validateHexString(str string) error {
+ formatCheck, _ := regexp.MatchString("^[a-fA-F0-9]+$", str)
+ if !formatCheck {
+ return fmt.Errorf("%w: %s", ErrInvalidHexString, str)
+ }
+ return nil
+}
+
+/*
+validateKeyVal validates the KeyVal struct. In case of an ed25519 key,
+it will check for a hex string for private and public key. In any other
+case, validateKeyVal will try to decode the PEM block. If this succeeds,
+we have a valid PEM block in our KeyVal struct. On success it will return nil
+on failure it will return the corresponding error. This can be either
+an ErrInvalidHexString, an ErrNoPEMBlock or an ErrUnsupportedKeyType
+if the KeyType is unknown.
+*/
+func validateKeyVal(key Key) error {
+ switch key.KeyType {
+ case ed25519KeyType:
+ // We cannot use matchPublicKeyKeyType or matchPrivateKeyKeyType here,
+ // because we retrieve the key not from PEM. Hence we are dealing with
+ // plain ed25519 key bytes. These bytes can't be typechecked like in the
+ // matchKeyKeytype functions.
+ err := validateHexString(key.KeyVal.Public)
+ if err != nil {
+ return err
+ }
+ if key.KeyVal.Private != "" {
+ err := validateHexString(key.KeyVal.Private)
+ if err != nil {
+ return err
+ }
+ }
+ case rsaKeyType, ecdsaKeyType:
+ // We do not need the pemData here, so we can throw it away via '_'
+ _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public))
+ if err != nil {
+ return err
+ }
+ err = matchPublicKeyKeyType(parsedKey, key.KeyType)
+ if err != nil {
+ return err
+ }
+ if key.KeyVal.Private != "" {
+ // We do not need the pemData here, so we can throw it away via '_'
+ _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private))
+ if err != nil {
+ return err
+ }
+ err = matchPrivateKeyKeyType(parsedKey, key.KeyType)
+ if err != nil {
+ return err
+ }
+ }
+ default:
+ return ErrUnsupportedKeyType
+ }
+ return nil
+}
+
+/*
+matchPublicKeyKeyType validates an interface if it can be asserted to a
+the RSA or ECDSA public key type. We can only check RSA and ECDSA this way,
+because we are storing them in PEM format. Ed25519 keys are stored as plain
+ed25519 keys encoded as hex strings, thus we have no metadata for them.
+This function will return nil on success. If the key type does not match
+it will return an ErrKeyKeyTypeMismatch.
+*/
+func matchPublicKeyKeyType(key interface{}, keyType string) error {
+ switch key.(type) {
+ case *rsa.PublicKey:
+ if keyType != rsaKeyType {
+ return ErrKeyKeyTypeMismatch
+ }
+ case *ecdsa.PublicKey:
+ if keyType != ecdsaKeyType {
+ return ErrKeyKeyTypeMismatch
+ }
+ default:
+ return ErrInvalidKey
+ }
+ return nil
+}
+
+/*
+matchPrivateKeyKeyType validates an interface if it can be asserted to a
+the RSA or ECDSA private key type. We can only check RSA and ECDSA this way,
+because we are storing them in PEM format. Ed25519 keys are stored as plain
+ed25519 keys encoded as hex strings, thus we have no metadata for them.
+This function will return nil on success. If the key type does not match
+it will return an ErrKeyKeyTypeMismatch.
+*/
+func matchPrivateKeyKeyType(key interface{}, keyType string) error {
+ // we can only check RSA and ECDSA this way, because we are storing them in PEM
+ // format. ed25519 keys are stored as plain ed25519 keys encoded as hex strings
+ // so we have no metadata for them.
+ switch key.(type) {
+ case *rsa.PrivateKey:
+ if keyType != rsaKeyType {
+ return ErrKeyKeyTypeMismatch
+ }
+ case *ecdsa.PrivateKey:
+ if keyType != ecdsaKeyType {
+ return ErrKeyKeyTypeMismatch
+ }
+ default:
+ return ErrInvalidKey
+ }
+ return nil
+}
+
+/*
+matchKeyTypeScheme checks if the specified scheme matches our specified
+keyType. If the keyType is not supported it will return an
+ErrUnsupportedKeyType. If the keyType and scheme do not match it will return
+an ErrSchemeKeyTypeMismatch. If the specified keyType and scheme are
+compatible matchKeyTypeScheme will return nil.
+*/
+func matchKeyTypeScheme(key Key) error {
+ switch key.KeyType {
+ case rsaKeyType:
+ for _, scheme := range getSupportedRSASchemes() {
+ if key.Scheme == scheme {
+ return nil
+ }
+ }
+ case ed25519KeyType:
+ for _, scheme := range getSupportedEd25519Schemes() {
+ if key.Scheme == scheme {
+ return nil
+ }
+ }
+ case ecdsaKeyType:
+ for _, scheme := range getSupportedEcdsaSchemes() {
+ if key.Scheme == scheme {
+ return nil
+ }
+ }
+ default:
+ return fmt.Errorf("%w: %s", ErrUnsupportedKeyType, key.KeyType)
+ }
+ return ErrSchemeKeyTypeMismatch
+}
+
+/*
+validateKey checks the outer key object (everything, except the KeyVal struct).
+It verifies the keyID for being a hex string and checks for empty fields.
+On success it will return nil, on error it will return the corresponding error.
+Either: ErrEmptyKeyField or ErrInvalidHexString.
+*/
+func validateKey(key Key) error {
+ err := validateHexString(key.KeyID)
+ if err != nil {
+ return err
+ }
+ // This probably can be done more elegant with reflection
+ // but we care about performance, do we?!
+ if key.KeyType == "" {
+ return fmt.Errorf("%w: keytype", ErrEmptyKeyField)
+ }
+ if key.KeyVal.Public == "" && key.KeyVal.Certificate == "" {
+ return fmt.Errorf("%w: keyval.public and keyval.certificate cannot both be blank", ErrEmptyKeyField)
+ }
+ if key.Scheme == "" {
+ return fmt.Errorf("%w: scheme", ErrEmptyKeyField)
+ }
+ err = matchKeyTypeScheme(key)
+ if err != nil {
+ return err
+ }
+ // only check for supported KeyIDHashAlgorithms, if the variable has been set
+ if key.KeyIDHashAlgorithms != nil {
+ supportedKeyIDHashAlgorithms := getSupportedKeyIDHashAlgorithms()
+ if !supportedKeyIDHashAlgorithms.IsSubSet(NewSet(key.KeyIDHashAlgorithms...)) {
+ return fmt.Errorf("%w: %#v, supported are: %#v", ErrUnsupportedKeyIDHashAlgorithms, key.KeyIDHashAlgorithms, getSupportedKeyIDHashAlgorithms())
+ }
+ }
+ return nil
+}
+
+/*
+validatePublicKey is a wrapper around validateKey. It test if the private key
+value in the key is empty and then validates the key via calling validateKey.
+On success it will return nil, on error it will return an ErrNoPublicKey error.
+*/
+func validatePublicKey(key Key) error {
+ if key.KeyVal.Private != "" {
+ return ErrNoPublicKey
+ }
+ err := validateKey(key)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+Signature represents a generic in-toto signature that contains the identifier
+of the Key, which was used to create the signature and the signature data. The
+used signature scheme is found in the corresponding Key.
+*/
+type Signature struct {
+ KeyID string `json:"keyid"`
+ Sig string `json:"sig"`
+ Certificate string `json:"cert,omitempty"`
+}
+
+// GetCertificate returns the parsed x509 certificate attached to the signature,
+// if it exists.
+func (sig Signature) GetCertificate() (Key, error) {
+ key := Key{}
+ if len(sig.Certificate) == 0 {
+ return key, errors.New("Signature has empty Certificate")
+ }
+
+ err := key.LoadKeyReaderDefaults(strings.NewReader(sig.Certificate))
+ return key, err
+}
+
+/*
+validateSignature is a function used to check if a passed signature is valid,
+by inspecting the key ID and the signature itself.
+*/
+func validateSignature(signature Signature) error {
+ if err := validateHexString(signature.KeyID); err != nil {
+ return err
+ }
+ if err := validateHexString(signature.Sig); err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+validateSliceOfSignatures is a helper function used to validate multiple
+signatures stored in a slice.
+*/
+func validateSliceOfSignatures(slice []Signature) error {
+ for _, signature := range slice {
+ if err := validateSignature(signature); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+/*
+Link represents the evidence of a supply chain step performed by a functionary.
+It should be contained in a generic Metablock object, which provides
+functionality for signing and signature verification, and reading from and
+writing to disk.
+*/
+type Link struct {
+ Type string `json:"_type"`
+ Name string `json:"name"`
+ Materials map[string]interface{} `json:"materials"`
+ Products map[string]interface{} `json:"products"`
+ ByProducts map[string]interface{} `json:"byproducts"`
+ Command []string `json:"command"`
+ Environment map[string]interface{} `json:"environment"`
+}
+
+/*
+validateArtifacts is a general function used to validate products and materials.
+*/
+func validateArtifacts(artifacts map[string]interface{}) error {
+ for artifactName, artifact := range artifacts {
+ artifactValue := reflect.ValueOf(artifact).MapRange()
+ for artifactValue.Next() {
+ value := artifactValue.Value().Interface().(string)
+ hashType := artifactValue.Key().Interface().(string)
+ if err := validateHexString(value); err != nil {
+ return fmt.Errorf("in artifact '%s', %s hash value: %s",
+ artifactName, hashType, err.Error())
+ }
+ }
+ }
+ return nil
+}
+
+/*
+validateLink is a function used to ensure that a passed item of type Link
+matches the necessary format.
+*/
+func validateLink(link Link) error {
+ if link.Type != "link" {
+ return fmt.Errorf("invalid type for link '%s': should be 'link'",
+ link.Name)
+ }
+
+ if err := validateArtifacts(link.Materials); err != nil {
+ return fmt.Errorf("in materials of link '%s': %s", link.Name,
+ err.Error())
+ }
+
+ if err := validateArtifacts(link.Products); err != nil {
+ return fmt.Errorf("in products of link '%s': %s", link.Name,
+ err.Error())
+ }
+
+ return nil
+}
+
+/*
+LinkNameFormat represents a format string used to create the filename for a
+signed Link (wrapped in a Metablock). It consists of the name of the link and
+the first 8 characters of the signing key id. E.g.:
+
+ fmt.Sprintf(LinkNameFormat, "package",
+ "2f89b9272acfc8f4a0a0f094d789fdb0ba798b0fe41f2f5f417c12f0085ff498")
+ // returns "package.2f89b9272.link"
+*/
+const LinkNameFormat = "%s.%.8s.link"
+const PreliminaryLinkNameFormat = ".%s.%.8s.link-unfinished"
+
+/*
+LinkNameFormatShort is for links that are not signed, e.g.:
+
+ fmt.Sprintf(LinkNameFormatShort, "unsigned")
+ // returns "unsigned.link"
+*/
+const LinkNameFormatShort = "%s.link"
+const LinkGlobFormat = "%s.????????.link"
+
+/*
+SublayoutLinkDirFormat represents the format of the name of the directory for
+sublayout links during the verification workflow.
+*/
+const SublayoutLinkDirFormat = "%s.%.8s"
+
+/*
+SupplyChainItem summarizes common fields of the two available supply chain
+item types, Inspection and Step.
+*/
+type SupplyChainItem struct {
+ Name string `json:"name"`
+ ExpectedMaterials [][]string `json:"expected_materials"`
+ ExpectedProducts [][]string `json:"expected_products"`
+}
+
+/*
+validateArtifactRule calls UnpackRule to validate that the passed rule conforms
+with any of the available rule formats.
+*/
+func validateArtifactRule(rule []string) error {
+ if _, err := UnpackRule(rule); err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+validateSliceOfArtifactRules iterates over passed rules to validate them.
+*/
+func validateSliceOfArtifactRules(rules [][]string) error {
+ for _, rule := range rules {
+ if err := validateArtifactRule(rule); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+/*
+validateSupplyChainItem is used to validate the common elements found in both
+steps and inspections. Here, the function primarily ensures that the name of
+a supply chain item isn't empty.
+*/
+func validateSupplyChainItem(item SupplyChainItem) error {
+ if item.Name == "" {
+ return fmt.Errorf("name cannot be empty")
+ }
+
+ if err := validateSliceOfArtifactRules(item.ExpectedMaterials); err != nil {
+ return fmt.Errorf("invalid material rule: %s", err)
+ }
+ if err := validateSliceOfArtifactRules(item.ExpectedProducts); err != nil {
+ return fmt.Errorf("invalid product rule: %s", err)
+ }
+ return nil
+}
+
+/*
+Inspection represents an in-toto supply chain inspection, whose command in the
+Run field is executed during final product verification, generating unsigned
+link metadata. Materials and products used/produced by the inspection are
+constrained by the artifact rules in the inspection's ExpectedMaterials and
+ExpectedProducts fields.
+*/
+type Inspection struct {
+ Type string `json:"_type"`
+ Run []string `json:"run"`
+ SupplyChainItem
+}
+
+/*
+validateInspection ensures that a passed inspection is valid and matches the
+necessary format of an inspection.
+*/
+func validateInspection(inspection Inspection) error {
+ if err := validateSupplyChainItem(inspection.SupplyChainItem); err != nil {
+ return fmt.Errorf("inspection %s", err.Error())
+ }
+ if inspection.Type != "inspection" {
+ return fmt.Errorf("invalid Type value for inspection '%s': should be "+
+ "'inspection'", inspection.SupplyChainItem.Name)
+ }
+ return nil
+}
+
+/*
+Step represents an in-toto step of the supply chain performed by a functionary.
+During final product verification in-toto looks for corresponding Link
+metadata, which is used as signed evidence that the step was performed
+according to the supply chain definition. Materials and products used/produced
+by the step are constrained by the artifact rules in the step's
+ExpectedMaterials and ExpectedProducts fields.
+*/
+type Step struct {
+ Type string `json:"_type"`
+ PubKeys []string `json:"pubkeys"`
+ CertificateConstraints []CertificateConstraint `json:"cert_constraints,omitempty"`
+ ExpectedCommand []string `json:"expected_command"`
+ Threshold int `json:"threshold"`
+ SupplyChainItem
+}
+
+// CheckCertConstraints returns true if the provided certificate matches at least one
+// of the constraints for this step.
+func (s Step) CheckCertConstraints(key Key, rootCAIDs []string, rootCertPool, intermediateCertPool *x509.CertPool) error {
+ if len(s.CertificateConstraints) == 0 {
+ return fmt.Errorf("no constraints found")
+ }
+
+ _, possibleCert, err := decodeAndParse([]byte(key.KeyVal.Certificate))
+ if err != nil {
+ return err
+ }
+
+ cert, ok := possibleCert.(*x509.Certificate)
+ if !ok {
+ return fmt.Errorf("not a valid certificate")
+ }
+
+ for _, constraint := range s.CertificateConstraints {
+ err = constraint.Check(cert, rootCAIDs, rootCertPool, intermediateCertPool)
+ if err == nil {
+ return nil
+ }
+ }
+ if err != nil {
+ return err
+ }
+
+ // this should not be reachable since there is at least one constraint, and the for loop only saw err != nil
+ return fmt.Errorf("unknown certificate constraint error")
+}
+
+/*
+validateStep ensures that a passed step is valid and matches the
+necessary format of an step.
+*/
+func validateStep(step Step) error {
+ if err := validateSupplyChainItem(step.SupplyChainItem); err != nil {
+ return fmt.Errorf("step %s", err.Error())
+ }
+ if step.Type != "step" {
+ return fmt.Errorf("invalid Type value for step '%s': should be 'step'",
+ step.SupplyChainItem.Name)
+ }
+ for _, keyID := range step.PubKeys {
+ if err := validateHexString(keyID); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+/*
+ISO8601DateSchema defines the format string of a timestamp following the
+ISO 8601 standard.
+*/
+const ISO8601DateSchema = "2006-01-02T15:04:05Z"
+
+/*
+Layout represents the definition of a software supply chain. It lists the
+sequence of steps required in the software supply chain and the functionaries
+authorized to perform these steps. Functionaries are identified by their
+public keys. In addition, the layout may list a sequence of inspections that
+are executed during in-toto supply chain verification. A layout should be
+contained in a generic Metablock object, which provides functionality for
+signing and signature verification, and reading from and writing to disk.
+*/
+type Layout struct {
+ Type string `json:"_type"`
+ Steps []Step `json:"steps"`
+ Inspect []Inspection `json:"inspect"`
+ Keys map[string]Key `json:"keys"`
+ RootCas map[string]Key `json:"rootcas,omitempty"`
+ IntermediateCas map[string]Key `json:"intermediatecas,omitempty"`
+ Expires string `json:"expires"`
+ Readme string `json:"readme"`
+}
+
+// Go does not allow to pass `[]T` (slice with certain type) to a function
+// that accepts `[]interface{}` (slice with generic type)
+// We have to manually create the interface slice first, see
+// https://golang.org/doc/faq#convert_slice_of_interface
+// TODO: Is there a better way to do polymorphism for steps and inspections?
+func (l *Layout) stepsAsInterfaceSlice() []interface{} {
+ stepsI := make([]interface{}, len(l.Steps))
+ for i, v := range l.Steps {
+ stepsI[i] = v
+ }
+ return stepsI
+}
+func (l *Layout) inspectAsInterfaceSlice() []interface{} {
+ inspectionsI := make([]interface{}, len(l.Inspect))
+ for i, v := range l.Inspect {
+ inspectionsI[i] = v
+ }
+ return inspectionsI
+}
+
+// RootCAIDs returns a slice of all of the Root CA IDs
+func (l *Layout) RootCAIDs() []string {
+ rootCAIDs := make([]string, 0, len(l.RootCas))
+ for rootCAID := range l.RootCas {
+ rootCAIDs = append(rootCAIDs, rootCAID)
+ }
+ return rootCAIDs
+}
+
+func validateLayoutKeys(keys map[string]Key) error {
+ for keyID, key := range keys {
+ if key.KeyID != keyID {
+ return fmt.Errorf("invalid key found")
+ }
+ err := validatePublicKey(key)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+/*
+validateLayout is a function used to ensure that a passed item of type Layout
+matches the necessary format.
+*/
+func validateLayout(layout Layout) error {
+ if layout.Type != "layout" {
+ return fmt.Errorf("invalid Type value for layout: should be 'layout'")
+ }
+
+ if _, err := time.Parse(ISO8601DateSchema, layout.Expires); err != nil {
+ return fmt.Errorf("expiry time parsed incorrectly - date either" +
+ " invalid or of incorrect format")
+ }
+
+ if err := validateLayoutKeys(layout.Keys); err != nil {
+ return err
+ }
+
+ if err := validateLayoutKeys(layout.RootCas); err != nil {
+ return err
+ }
+
+ if err := validateLayoutKeys(layout.IntermediateCas); err != nil {
+ return err
+ }
+
+ var namesSeen = make(map[string]bool)
+ for _, step := range layout.Steps {
+ if namesSeen[step.Name] {
+ return fmt.Errorf("non unique step or inspection name found")
+ }
+
+ namesSeen[step.Name] = true
+
+ if err := validateStep(step); err != nil {
+ return err
+ }
+ }
+ for _, inspection := range layout.Inspect {
+ if namesSeen[inspection.Name] {
+ return fmt.Errorf("non unique step or inspection name found")
+ }
+
+ namesSeen[inspection.Name] = true
+ }
+ return nil
+}
+
+type Metadata interface {
+ Sign(Key) error
+ VerifySignature(Key) error
+ GetPayload() any
+ Sigs() []Signature
+ GetSignatureForKeyID(string) (Signature, error)
+ Dump(string) error
+}
+
+func LoadMetadata(path string) (Metadata, error) {
+ jsonBytes, err := os.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ var rawData map[string]*json.RawMessage
+ if err := json.Unmarshal(jsonBytes, &rawData); err != nil {
+ return nil, err
+ }
+
+ if _, ok := rawData["payloadType"]; ok {
+ dsseEnv := &dsse.Envelope{}
+ if rawData["payload"] == nil || rawData["signatures"] == nil {
+ return nil, fmt.Errorf("in-toto metadata envelope requires 'payload' and 'signatures' parts")
+ }
+
+ if err := json.Unmarshal(jsonBytes, dsseEnv); err != nil {
+ return nil, err
+ }
+
+ if dsseEnv.PayloadType != PayloadType {
+ return nil, ErrInvalidPayloadType
+ }
+
+ return loadEnvelope(dsseEnv)
+ }
+
+ mb := &Metablock{}
+
+ // Error out on missing `signed` or `signatures` field or if
+ // one of them has a `null` value, which would lead to a nil pointer
+ // dereference in Unmarshal below.
+ if rawData["signed"] == nil || rawData["signatures"] == nil {
+ return nil, fmt.Errorf("in-toto metadata requires 'signed' and 'signatures' parts")
+ }
+
+ // Fully unmarshal signatures part
+ if err := json.Unmarshal(*rawData["signatures"], &mb.Signatures); err != nil {
+ return nil, err
+ }
+
+ payload, err := loadPayload(*rawData["signed"])
+ if err != nil {
+ return nil, err
+ }
+
+ mb.Signed = payload
+
+ return mb, nil
+}
+
+/*
+Metablock is a generic container for signable in-toto objects such as Layout
+or Link. It has two fields, one that contains the signable object and one that
+contains corresponding signatures. Metablock also provides functionality for
+signing and signature verification, and reading from and writing to disk.
+*/
+type Metablock struct {
+ // NOTE: Whenever we want to access an attribute of `Signed` we have to
+ // perform type assertion, e.g. `metablock.Signed.(Layout).Keys`
+ // Maybe there is a better way to store either Layouts or Links in `Signed`?
+ // The notary folks seem to have separate container structs:
+ // https://github.com/theupdateframework/notary/blob/master/tuf/data/root.go#L10-L14
+ // https://github.com/theupdateframework/notary/blob/master/tuf/data/targets.go#L13-L17
+ // I implemented it this way, because there will be several functions that
+ // receive or return a Metablock, where the type of Signed has to be inferred
+ // on runtime, e.g. when iterating over links for a layout, and a link can
+ // turn out to be a layout (sublayout)
+ Signed interface{} `json:"signed"`
+ Signatures []Signature `json:"signatures"`
+}
+
+type jsonField struct {
+ name string
+ omitempty bool
+}
+
+/*
+checkRequiredJSONFields checks that the passed map (obj) has keys for each of
+the json tags in the passed struct type (typ), and returns an error otherwise.
+Any json tags that contain the "omitempty" option be allowed to be optional.
+*/
+func checkRequiredJSONFields(obj map[string]interface{},
+ typ reflect.Type) error {
+
+ // Create list of json tags, e.g. `json:"_type"`
+ attributeCount := typ.NumField()
+ allFields := make([]jsonField, 0)
+ for i := 0; i < attributeCount; i++ {
+ fieldStr := typ.Field(i).Tag.Get("json")
+ field := jsonField{
+ name: fieldStr,
+ omitempty: false,
+ }
+
+ if idx := strings.Index(fieldStr, ","); idx != -1 {
+ field.name = fieldStr[:idx]
+ field.omitempty = strings.Contains(fieldStr[idx+1:], "omitempty")
+ }
+
+ allFields = append(allFields, field)
+ }
+
+ // Assert that there's a key in the passed map for each tag
+ for _, field := range allFields {
+ if _, ok := obj[field.name]; !ok && !field.omitempty {
+ return fmt.Errorf("required field %s missing", field.name)
+ }
+ }
+ return nil
+}
+
+/*
+Load parses JSON formatted metadata at the passed path into the Metablock
+object on which it was called. It returns an error if it cannot parse
+a valid JSON formatted Metablock that contains a Link or Layout.
+
+Deprecated: Use LoadMetadata for a signature wrapper agnostic way to load an
+envelope.
+*/
+func (mb *Metablock) Load(path string) error {
+ // Read entire file
+ jsonBytes, err := os.ReadFile(path)
+ if err != nil {
+ return err
+ }
+
+ // Unmarshal JSON into a map of raw messages (signed and signatures)
+ // We can't fully unmarshal immediately, because we need to inspect the
+ // type (link or layout) to decide which data structure to use
+ var rawMb map[string]*json.RawMessage
+ if err := json.Unmarshal(jsonBytes, &rawMb); err != nil {
+ return err
+ }
+
+ // Error out on missing `signed` or `signatures` field or if
+ // one of them has a `null` value, which would lead to a nil pointer
+ // dereference in Unmarshal below.
+ if rawMb["signed"] == nil || rawMb["signatures"] == nil {
+ return fmt.Errorf("in-toto metadata requires 'signed' and" +
+ " 'signatures' parts")
+ }
+
+ // Fully unmarshal signatures part
+ if err := json.Unmarshal(*rawMb["signatures"], &mb.Signatures); err != nil {
+ return err
+ }
+
+ payload, err := loadPayload(*rawMb["signed"])
+ if err != nil {
+ return err
+ }
+
+ mb.Signed = payload
+
+ return nil
+}
+
+/*
+Dump JSON serializes and writes the Metablock on which it was called to the
+passed path. It returns an error if JSON serialization or writing fails.
+*/
+func (mb *Metablock) Dump(path string) error {
+ // JSON encode Metablock formatted with newlines and indentation
+ // TODO: parametrize format
+ jsonBytes, err := json.MarshalIndent(mb, "", " ")
+ if err != nil {
+ return err
+ }
+
+ // Write JSON bytes to the passed path with permissions (-rw-r--r--)
+ err = os.WriteFile(path, jsonBytes, 0644)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+/*
+GetSignableRepresentation returns the canonical JSON representation of the
+Signed field of the Metablock on which it was called. If canonicalization
+fails the first return value is nil and the second return value is the error.
+*/
+func (mb *Metablock) GetSignableRepresentation() ([]byte, error) {
+ return cjson.EncodeCanonical(mb.Signed)
+}
+
+func (mb *Metablock) GetPayload() any {
+ return mb.Signed
+}
+
+func (mb *Metablock) Sigs() []Signature {
+ return mb.Signatures
+}
+
+/*
+VerifySignature verifies the first signature, corresponding to the passed Key,
+that it finds in the Signatures field of the Metablock on which it was called.
+It returns an error if Signatures does not contain a Signature corresponding to
+the passed Key, the object in Signed cannot be canonicalized, or the Signature
+is invalid.
+*/
+func (mb *Metablock) VerifySignature(key Key) error {
+ sig, err := mb.GetSignatureForKeyID(key.KeyID)
+ if err != nil {
+ return err
+ }
+
+ dataCanonical, err := mb.GetSignableRepresentation()
+ if err != nil {
+ return err
+ }
+
+ if err := VerifySignature(key, sig, dataCanonical); err != nil {
+ return err
+ }
+ return nil
+}
+
+// GetSignatureForKeyID returns the signature that was created by the provided keyID, if it exists.
+func (mb *Metablock) GetSignatureForKeyID(keyID string) (Signature, error) {
+ for _, s := range mb.Signatures {
+ if s.KeyID == keyID {
+ return s, nil
+ }
+ }
+
+ return Signature{}, fmt.Errorf("no signature found for key '%s'", keyID)
+}
+
+/*
+ValidateMetablock ensures that a passed Metablock object is valid. It indirectly
+validates the Link or Layout that the Metablock object contains.
+*/
+func ValidateMetablock(mb Metablock) error {
+ switch mbSignedType := mb.Signed.(type) {
+ case Layout:
+ if err := validateLayout(mb.Signed.(Layout)); err != nil {
+ return err
+ }
+ case Link:
+ if err := validateLink(mb.Signed.(Link)); err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("unknown type '%s', should be 'layout' or 'link'",
+ mbSignedType)
+ }
+
+ if err := validateSliceOfSignatures(mb.Signatures); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+/*
+Sign creates a signature over the signed portion of the metablock using the Key
+object provided. It then appends the resulting signature to the signatures
+field as provided. It returns an error if the Signed object cannot be
+canonicalized, or if the key is invalid or not supported.
+*/
+func (mb *Metablock) Sign(key Key) error {
+
+ dataCanonical, err := mb.GetSignableRepresentation()
+ if err != nil {
+ return err
+ }
+
+ newSignature, err := GenerateSignature(dataCanonical, key)
+ if err != nil {
+ return err
+ }
+
+ mb.Signatures = append(mb.Signatures, newSignature)
+ return nil
+}
diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/rulelib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/rulelib.go
new file mode 100644
index 00000000000..1bba77c39e5
--- /dev/null
+++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/rulelib.go
@@ -0,0 +1,131 @@
+package in_toto
+
+import (
+ "fmt"
+ "strings"
+)
+
+// An error message issued in UnpackRule if it receives a malformed rule.
+var errorMsg = "Wrong rule format, available formats are:\n" +
+ "\tMATCH [IN ] WITH (MATERIALS|PRODUCTS)" +
+ " [IN ] FROM ,\n" +
+ "\tCREATE ,\n" +
+ "\tDELETE ,\n" +
+ "\tMODIFY ,\n" +
+ "\tALLOW ,\n" +
+ "\tDISALLOW ,\n" +
+ "\tREQUIRE \n\n"
+
+/*
+UnpackRule parses the passed rule and extracts and returns the information
+required for rule processing. It can be used to verify if a rule has a valid
+format. Available rule formats are:
+
+ MATCH [IN ] WITH (MATERIALS|PRODUCTS)
+ [IN ] FROM ,
+ CREATE ,
+ DELETE ,
+ MODIFY ,
+ ALLOW ,
+ DISALLOW
+
+Rule tokens are normalized to lower case before returning. The returned map
+has the following format:
+
+ {
+ "type": "match" | "create" | "delete" |"modify" | "allow" | "disallow"
+ "pattern": "",
+ "srcPrefix": "", // MATCH rule only
+ "dstPrefix": "", // MATCH rule only
+ "dstType": "materials" | "products">, // MATCH rule only
+ "dstName": "", // Match rule only
+ }
+
+If the rule does not match any of the available formats the first return value
+is nil and the second return value is the error.
+*/
+func UnpackRule(rule []string) (map[string]string, error) {
+ // Cache rule len
+ ruleLen := len(rule)
+
+ // Create all lower rule copy to case-insensitively parse out tokens whose
+ // position we don't know yet. We keep the original rule to retain the
+ // non-token elements' case.
+ ruleLower := make([]string, ruleLen)
+ for i, val := range rule {
+ ruleLower[i] = strings.ToLower(val)
+ }
+
+ switch ruleLower[0] {
+ case "create", "modify", "delete", "allow", "disallow", "require":
+ if ruleLen != 2 {
+ return nil,
+ fmt.Errorf("%s Got:\n\t %s", errorMsg, rule)
+ }
+
+ return map[string]string{
+ "type": ruleLower[0],
+ "pattern": rule[1],
+ }, nil
+
+ case "match":
+ var srcPrefix string
+ var dstType string
+ var dstPrefix string
+ var dstName string
+
+ // MATCH IN WITH (MATERIALS|PRODUCTS) \
+ // IN FROM
+ if ruleLen == 10 && ruleLower[2] == "in" &&
+ ruleLower[4] == "with" && ruleLower[6] == "in" &&
+ ruleLower[8] == "from" {
+ srcPrefix = rule[3]
+ dstType = ruleLower[5]
+ dstPrefix = rule[7]
+ dstName = rule[9]
+ // MATCH IN WITH (MATERIALS|PRODUCTS) \
+ // FROM
+ } else if ruleLen == 8 && ruleLower[2] == "in" &&
+ ruleLower[4] == "with" && ruleLower[6] == "from" {
+ srcPrefix = rule[3]
+ dstType = ruleLower[5]
+ dstPrefix = ""
+ dstName = rule[7]
+
+ // MATCH WITH (MATERIALS|PRODUCTS) IN
+ // FROM
+ } else if ruleLen == 8 && ruleLower[2] == "with" &&
+ ruleLower[4] == "in" && ruleLower[6] == "from" {
+ srcPrefix = ""
+ dstType = ruleLower[3]
+ dstPrefix = rule[5]
+ dstName = rule[7]
+
+ // MATCH WITH (MATERIALS|PRODUCTS) FROM
+ } else if ruleLen == 6 && ruleLower[2] == "with" &&
+ ruleLower[4] == "from" {
+ srcPrefix = ""
+ dstType = ruleLower[3]
+ dstPrefix = ""
+ dstName = rule[5]
+
+ } else {
+ return nil,
+ fmt.Errorf("%s Got:\n\t %s", errorMsg, rule)
+
+ }
+
+ return map[string]string{
+ "type": ruleLower[0],
+ "pattern": rule[1],
+ "srcPrefix": srcPrefix,
+ "dstPrefix": dstPrefix,
+ "dstType": dstType,
+ "dstName": dstName,
+ }, nil
+
+ default:
+ return nil,
+ fmt.Errorf("%s Got:\n\t %s", errorMsg, rule)
+ }
+}
diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go
new file mode 100644
index 00000000000..f0a55d82199
--- /dev/null
+++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go
@@ -0,0 +1,462 @@
+package in_toto
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "syscall"
+
+ "github.com/shibumi/go-pathspec"
+)
+
+// ErrSymCycle signals a detected symlink cycle in our RecordArtifacts() function.
+var ErrSymCycle = errors.New("symlink cycle detected")
+
+// ErrUnsupportedHashAlgorithm signals a missing hash mapping in getHashMapping
+var ErrUnsupportedHashAlgorithm = errors.New("unsupported hash algorithm detected")
+
+var ErrEmptyCommandArgs = errors.New("the command args are empty")
+
+// visitedSymlinks is a hashset that contains all paths that we have visited.
+var visitedSymlinks Set
+
+/*
+RecordArtifact reads and hashes the contents of the file at the passed path
+using sha256 and returns a map in the following format:
+
+ {
+ "": {
+ "sha256":
+ }
+ }
+
+If reading the file fails, the first return value is nil and the second return
+value is the error.
+NOTE: For cross-platform consistency Windows-style line separators (CRLF) are
+normalized to Unix-style line separators (LF) before hashing file contents.
+*/
+func RecordArtifact(path string, hashAlgorithms []string, lineNormalization bool) (map[string]interface{}, error) {
+ supportedHashMappings := getHashMapping()
+ // Read file from passed path
+ contents, err := os.ReadFile(path)
+ hashedContentsMap := make(map[string]interface{})
+ if err != nil {
+ return nil, err
+ }
+
+ if lineNormalization {
+ // "Normalize" file contents. We convert all line separators to '\n'
+ // for keeping operating system independence
+ contents = bytes.ReplaceAll(contents, []byte("\r\n"), []byte("\n"))
+ contents = bytes.ReplaceAll(contents, []byte("\r"), []byte("\n"))
+ }
+
+ // Create a map of all the hashes present in the hash_func list
+ for _, element := range hashAlgorithms {
+ if _, ok := supportedHashMappings[element]; !ok {
+ return nil, fmt.Errorf("%w: %s", ErrUnsupportedHashAlgorithm, element)
+ }
+ h := supportedHashMappings[element]
+ result := fmt.Sprintf("%x", hashToHex(h(), contents))
+ hashedContentsMap[element] = result
+ }
+
+ // Return it in a format that is conformant with link metadata artifacts
+ return hashedContentsMap, nil
+}
+
+/*
+RecordArtifacts is a wrapper around recordArtifacts.
+RecordArtifacts initializes a set for storing visited symlinks,
+calls recordArtifacts and deletes the set if no longer needed.
+recordArtifacts walks through the passed slice of paths, traversing
+subdirectories, and calls RecordArtifact for each file. It returns a map in
+the following format:
+
+ {
+ "": {
+ "sha256":
+ },
+ "": {
+ "sha256":
+ },
+ ...
+ }
+
+If recording an artifact fails the first return value is nil and the second
+return value is the error.
+*/
+func RecordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool) (evalArtifacts map[string]interface{}, err error) {
+ // Make sure to initialize a fresh hashset for every RecordArtifacts call
+ visitedSymlinks = NewSet()
+ evalArtifacts, err = recordArtifacts(paths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs)
+ // pass result and error through
+ return evalArtifacts, err
+}
+
+/*
+recordArtifacts walks through the passed slice of paths, traversing
+subdirectories, and calls RecordArtifact for each file. It returns a map in
+the following format:
+
+ {
+ "": {
+ "sha256":
+ },
+ "": {
+ "sha256":
+ },
+ ...
+ }
+
+If recording an artifact fails the first return value is nil and the second
+return value is the error.
+*/
+func recordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool) (map[string]interface{}, error) {
+ artifacts := make(map[string]interface{})
+ for _, path := range paths {
+ err := filepath.Walk(path,
+ func(path string, info os.FileInfo, err error) error {
+ // Abort if Walk function has a problem,
+ // e.g. path does not exist
+ if err != nil {
+ return err
+ }
+ // We need to call pathspec.GitIgnore inside of our filepath.Walk, because otherwise
+ // we will not catch all paths. Just imagine a path like "." and a pattern like "*.pub".
+ // If we would call pathspec outside of the filepath.Walk this would not match.
+ ignore, err := pathspec.GitIgnore(gitignorePatterns, path)
+ if err != nil {
+ return err
+ }
+ if ignore {
+ return nil
+ }
+ // Don't hash directories
+ if info.IsDir() {
+ return nil
+ }
+
+ // check for symlink and evaluate the last element in a symlink
+ // chain via filepath.EvalSymlinks. We use EvalSymlinks here,
+ // because with os.Readlink() we would just read the next
+ // element in a possible symlink chain. This would mean more
+ // iterations. infoMode()&os.ModeSymlink uses the file
+ // type bitmask to check for a symlink.
+ if info.Mode()&os.ModeSymlink == os.ModeSymlink {
+ // return with error if we detect a symlink cycle
+ if ok := visitedSymlinks.Has(path); ok {
+ // this error will get passed through
+ // to RecordArtifacts()
+ return ErrSymCycle
+ }
+ evalSym, err := filepath.EvalSymlinks(path)
+ if err != nil {
+ return err
+ }
+ info, err := os.Stat(evalSym)
+ if err != nil {
+ return err
+ }
+ targetIsDir := false
+ if info.IsDir() {
+ if !followSymlinkDirs {
+ // We don't follow symlinked directories
+ return nil
+ }
+ targetIsDir = true
+ }
+ // add symlink to visitedSymlinks set
+ // this way, we know which link we have visited already
+ // if we visit a symlink twice, we have detected a symlink cycle
+ visitedSymlinks.Add(path)
+ // We recursively call recordArtifacts() to follow
+ // the new path.
+ evalArtifacts, evalErr := recordArtifacts([]string{evalSym}, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs)
+ if evalErr != nil {
+ return evalErr
+ }
+ for key, value := range evalArtifacts {
+ if targetIsDir {
+ symlinkPath := filepath.Join(path, strings.TrimPrefix(key, evalSym))
+ artifacts[symlinkPath] = value
+ } else {
+ artifacts[path] = value
+ }
+ }
+ return nil
+ }
+ artifact, err := RecordArtifact(path, hashAlgorithms, lineNormalization)
+ // Abort if artifact can't be recorded, e.g.
+ // due to file permissions
+ if err != nil {
+ return err
+ }
+
+ for _, strip := range lStripPaths {
+ if strings.HasPrefix(path, strip) {
+ path = strings.TrimPrefix(path, strip)
+ break
+ }
+ }
+ // Check if path is unique
+ if _, exists := artifacts[path]; exists {
+ return fmt.Errorf("left stripping has resulted in non unique dictionary key: %s", path)
+ }
+ artifacts[path] = artifact
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return artifacts, nil
+}
+
+/*
+waitErrToExitCode converts an error returned by Cmd.wait() to an exit code. It
+returns -1 if no exit code can be inferred.
+*/
+func waitErrToExitCode(err error) int {
+ // If there's no exit code, we return -1
+ retVal := -1
+
+ // See https://stackoverflow.com/questions/10385551/get-exit-code-go
+ if err != nil {
+ if exiterr, ok := err.(*exec.ExitError); ok {
+ // The program has exited with an exit code != 0
+ // This works on both Unix and Windows. Although package
+ // syscall is generally platform dependent, WaitStatus is
+ // defined for both Unix and Windows and in both cases has
+ // an ExitStatus() method with the same signature.
+ if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
+ retVal = status.ExitStatus()
+ }
+ }
+ } else {
+ retVal = 0
+ }
+
+ return retVal
+}
+
+/*
+RunCommand executes the passed command in a subprocess. The first element of
+cmdArgs is used as executable and the rest as command arguments. It captures
+and returns stdout, stderr and exit code. The format of the returned map is:
+
+ {
+ "return-value": ,
+ "stdout": "",
+ "stderr": ""
+ }
+
+If the command cannot be executed or no pipes for stdout or stderr can be
+created the first return value is nil and the second return value is the error.
+NOTE: Since stdout and stderr are captured, they cannot be seen during the
+command execution.
+*/
+func RunCommand(cmdArgs []string, runDir string) (map[string]interface{}, error) {
+ if len(cmdArgs) == 0 {
+ return nil, ErrEmptyCommandArgs
+ }
+
+ cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
+
+ if runDir != "" {
+ cmd.Dir = runDir
+ }
+
+ stderrPipe, err := cmd.StderrPipe()
+ if err != nil {
+ return nil, err
+ }
+ stdoutPipe, err := cmd.StdoutPipe()
+ if err != nil {
+ return nil, err
+ }
+
+ if err := cmd.Start(); err != nil {
+ return nil, err
+ }
+
+ // TODO: duplicate stdout, stderr
+ stdout, _ := io.ReadAll(stdoutPipe)
+ stderr, _ := io.ReadAll(stderrPipe)
+
+ retVal := waitErrToExitCode(cmd.Wait())
+
+ return map[string]interface{}{
+ "return-value": float64(retVal),
+ "stdout": string(stdout),
+ "stderr": string(stderr),
+ }, nil
+}
+
+/*
+InTotoRun executes commands, e.g. for software supply chain steps or
+inspections of an in-toto layout, and creates and returns corresponding link
+metadata. Link metadata contains recorded products at the passed productPaths
+and materials at the passed materialPaths. The returned link is wrapped in a
+Metablock object. If command execution or artifact recording fails the first
+return value is an empty Metablock and the second return value is the error.
+*/
+func InTotoRun(name string, runDir string, materialPaths []string, productPaths []string, cmdArgs []string, key Key, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool, useDSSE bool) (Metadata, error) {
+ materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs)
+ if err != nil {
+ return nil, err
+ }
+
+ // make sure that we only run RunCommand if cmdArgs is not nil or empty
+ byProducts := map[string]interface{}{}
+ if len(cmdArgs) != 0 {
+ byProducts, err = RunCommand(cmdArgs, runDir)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs)
+ if err != nil {
+ return nil, err
+ }
+
+ link := Link{
+ Type: "link",
+ Name: name,
+ Materials: materials,
+ Products: products,
+ ByProducts: byProducts,
+ Command: cmdArgs,
+ Environment: map[string]interface{}{},
+ }
+
+ if useDSSE {
+ env := &Envelope{}
+ if err := env.SetPayload(link); err != nil {
+ return nil, err
+ }
+
+ if !reflect.ValueOf(key).IsZero() {
+ if err := env.Sign(key); err != nil {
+ return nil, err
+ }
+ }
+
+ return env, nil
+ }
+
+ linkMb := &Metablock{Signed: link, Signatures: []Signature{}}
+ if !reflect.ValueOf(key).IsZero() {
+ if err := linkMb.Sign(key); err != nil {
+ return nil, err
+ }
+ }
+
+ return linkMb, nil
+}
+
+/*
+InTotoRecordStart begins the creation of a link metablock file in two steps,
+in order to provide evidence for supply chain steps that cannot be carries out
+by a single command. InTotoRecordStart collects the hashes of the materials
+before any commands are run, signs the unfinished link, and returns the link.
+*/
+func InTotoRecordStart(name string, materialPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool, useDSSE bool) (Metadata, error) {
+ materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs)
+ if err != nil {
+ return nil, err
+ }
+
+ link := Link{
+ Type: "link",
+ Name: name,
+ Materials: materials,
+ Products: map[string]interface{}{},
+ ByProducts: map[string]interface{}{},
+ Command: []string{},
+ Environment: map[string]interface{}{},
+ }
+
+ if useDSSE {
+ env := &Envelope{}
+ if err := env.SetPayload(link); err != nil {
+ return nil, err
+ }
+
+ if !reflect.ValueOf(key).IsZero() {
+ if err := env.Sign(key); err != nil {
+ return nil, err
+ }
+ }
+
+ return env, nil
+ }
+
+ linkMb := &Metablock{Signed: link, Signatures: []Signature{}}
+ linkMb.Signatures = []Signature{}
+ if !reflect.ValueOf(key).IsZero() {
+ if err := linkMb.Sign(key); err != nil {
+ return nil, err
+ }
+ }
+
+ return linkMb, nil
+}
+
+/*
+InTotoRecordStop ends the creation of a metatadata link file created by
+InTotoRecordStart. InTotoRecordStop takes in a signed unfinished link metablock
+created by InTotoRecordStart and records the hashes of any products creted by
+commands run between InTotoRecordStart and InTotoRecordStop. The resultant
+finished link metablock is then signed by the provided key and returned.
+*/
+func InTotoRecordStop(prelimLinkEnv Metadata, productPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool, useDSSE bool) (Metadata, error) {
+ if err := prelimLinkEnv.VerifySignature(key); err != nil {
+ return nil, err
+ }
+
+ link, ok := prelimLinkEnv.GetPayload().(Link)
+ if !ok {
+ return nil, errors.New("invalid metadata block")
+ }
+
+ products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs)
+ if err != nil {
+ return nil, err
+ }
+
+ link.Products = products
+
+ if useDSSE {
+ env := &Envelope{}
+ if err := env.SetPayload(link); err != nil {
+ return nil, err
+ }
+
+ if !reflect.ValueOf(key).IsZero() {
+ if err := env.Sign(key); err != nil {
+ return nil, err
+ }
+ }
+
+ return env, nil
+ }
+
+ linkMb := &Metablock{Signed: link, Signatures: []Signature{}}
+ if !reflect.ValueOf(key).IsZero() {
+ if err := linkMb.Sign(key); err != nil {
+ return linkMb, err
+ }
+ }
+
+ return linkMb, nil
+}
diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common/common.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common/common.go
new file mode 100644
index 00000000000..a45a4546346
--- /dev/null
+++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common/common.go
@@ -0,0 +1,16 @@
+package common
+
+// DigestSet contains a set of digests. It is represented as a map from
+// algorithm name to lowercase hex-encoded value.
+type DigestSet map[string]string
+
+// ProvenanceBuilder idenfifies the entity that executed the build steps.
+type ProvenanceBuilder struct {
+ ID string `json:"id"`
+}
+
+// ProvenanceMaterial defines the materials used to build an artifact.
+type ProvenanceMaterial struct {
+ URI string `json:"uri,omitempty"`
+ Digest DigestSet `json:"digest,omitempty"`
+}
diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1/provenance.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1/provenance.go
new file mode 100644
index 00000000000..5978e9229d9
--- /dev/null
+++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1/provenance.go
@@ -0,0 +1,50 @@
+package v01
+
+import (
+ "time"
+
+ "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common"
+)
+
+const (
+ // PredicateSLSAProvenance represents a build provenance for an artifact.
+ PredicateSLSAProvenance = "https://slsa.dev/provenance/v0.1"
+)
+
+// ProvenancePredicate is the provenance predicate definition.
+type ProvenancePredicate struct {
+ Builder common.ProvenanceBuilder `json:"builder"`
+ Recipe ProvenanceRecipe `json:"recipe"`
+ Metadata *ProvenanceMetadata `json:"metadata,omitempty"`
+ Materials []common.ProvenanceMaterial `json:"materials,omitempty"`
+}
+
+// ProvenanceRecipe describes the actions performed by the builder.
+type ProvenanceRecipe struct {
+ Type string `json:"type"`
+ // DefinedInMaterial can be sent as the null pointer to indicate that
+ // the value is not present.
+ DefinedInMaterial *int `json:"definedInMaterial,omitempty"`
+ EntryPoint string `json:"entryPoint"`
+ Arguments interface{} `json:"arguments,omitempty"`
+ Environment interface{} `json:"environment,omitempty"`
+}
+
+// ProvenanceMetadata contains metadata for the built artifact.
+type ProvenanceMetadata struct {
+ // Use pointer to make sure that the abscense of a time is not
+ // encoded as the Epoch time.
+ BuildStartedOn *time.Time `json:"buildStartedOn,omitempty"`
+ BuildFinishedOn *time.Time `json:"buildFinishedOn,omitempty"`
+ Completeness ProvenanceComplete `json:"completeness"`
+ Reproducible bool `json:"reproducible"`
+}
+
+// ProvenanceComplete indicates wheter the claims in build/recipe are complete.
+// For in depth information refer to the specifictaion:
+// https://github.com/in-toto/attestation/blob/v0.1.0/spec/predicates/provenance.md
+type ProvenanceComplete struct {
+ Arguments bool `json:"arguments"`
+ Environment bool `json:"environment"`
+ Materials bool `json:"materials"`
+}
diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go
new file mode 100644
index 00000000000..40416e29a85
--- /dev/null
+++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go
@@ -0,0 +1,144 @@
+package v02
+
+import (
+ "time"
+
+ "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common"
+)
+
+const (
+ // PredicateSLSAProvenance represents a build provenance for an artifact.
+ PredicateSLSAProvenance = "https://slsa.dev/provenance/v0.2"
+)
+
+// These are type aliases to common to avoid backwards incompatible changes.
+type (
+ DigestSet = common.DigestSet
+ ProvenanceBuilder = common.ProvenanceBuilder
+ ProvenanceMaterial = common.ProvenanceMaterial
+)
+
+// ProvenancePredicate is the provenance predicate definition.
+type ProvenancePredicate struct {
+ // Builder identifies the entity that executed the invocation, which is trusted to have
+ // correctly performed the operation and populated this provenance.
+ //
+ // The identity MUST reflect the trust base that consumers care about. How detailed to be is a
+ // judgement call. For example, GitHub Actions supports both GitHub-hosted runners and
+ // self-hosted runners. The GitHub-hosted runner might be a single identity because it’s all
+ // GitHub from the consumer’s perspective. Meanwhile, each self-hosted runner might have its
+ // own identity because not all runners are trusted by all consumers.
+ Builder common.ProvenanceBuilder `json:"builder"`
+
+ // BuildType is a URI indicating what type of build was performed. It determines the meaning of
+ // [Invocation], [BuildConfig] and [Materials].
+ BuildType string `json:"buildType"`
+
+ // Invocation identifies the event that kicked off the build. When combined with materials,
+ // this SHOULD fully describe the build, such that re-running this invocation results in
+ // bit-for-bit identical output (if the build is reproducible).
+ //
+ // MAY be unset/null if unknown, but this is DISCOURAGED.
+ Invocation ProvenanceInvocation `json:"invocation,omitempty"`
+
+ // BuildConfig lists the steps in the build. If [ProvenanceInvocation.ConfigSource] is not
+ // available, BuildConfig can be used to verify information about the build.
+ //
+ // This is an arbitrary JSON object with a schema defined by [BuildType].
+ BuildConfig interface{} `json:"buildConfig,omitempty"`
+
+ // Metadata contains other properties of the build.
+ Metadata *ProvenanceMetadata `json:"metadata,omitempty"`
+
+ // Materials is the collection of artifacts that influenced the build including sources,
+ // dependencies, build tools, base images, and so on.
+ //
+ // This is considered to be incomplete unless metadata.completeness.materials is true.
+ Materials []common.ProvenanceMaterial `json:"materials,omitempty"`
+}
+
+// ProvenanceInvocation identifies the event that kicked off the build.
+type ProvenanceInvocation struct {
+ // ConfigSource describes where the config file that kicked off the build came from. This is
+ // effectively a pointer to the source where [ProvenancePredicate.BuildConfig] came from.
+ ConfigSource ConfigSource `json:"configSource,omitempty"`
+
+ // Parameters is a collection of all external inputs that influenced the build on top of
+ // ConfigSource. For example, if the invocation type were “make”, then this might be the
+ // flags passed to make aside from the target, which is captured in [ConfigSource.EntryPoint].
+ //
+ // Consumers SHOULD accept only “safe” Parameters. The simplest and safest way to
+ // achieve this is to disallow any parameters altogether.
+ //
+ // This is an arbitrary JSON object with a schema defined by buildType.
+ Parameters interface{} `json:"parameters,omitempty"`
+
+ // Environment contains any other builder-controlled inputs necessary for correctly evaluating
+ // the build. Usually only needed for reproducing the build but not evaluated as part of
+ // policy.
+ //
+ // This SHOULD be minimized to only include things that are part of the public API, that cannot
+ // be recomputed from other values in the provenance, and that actually affect the evaluation
+ // of the build. For example, this might include variables that are referenced in the workflow
+ // definition, but it SHOULD NOT include a dump of all environment variables or include things
+ // like the hostname (assuming hostname is not part of the public API).
+ Environment interface{} `json:"environment,omitempty"`
+}
+
+type ConfigSource struct {
+ // URI indicating the identity of the source of the config.
+ URI string `json:"uri,omitempty"`
+ // Digest is a collection of cryptographic digests for the contents of the artifact specified
+ // by [URI].
+ Digest common.DigestSet `json:"digest,omitempty"`
+ // EntryPoint identifying the entry point into the build. This is often a path to a
+ // configuration file and/or a target label within that file. The syntax and meaning are
+ // defined by buildType. For example, if the buildType were “make”, then this would reference
+ // the directory in which to run make as well as which target to use.
+ //
+ // Consumers SHOULD accept only specific [ProvenanceInvocation.EntryPoint] values. For example,
+ // a policy might only allow the "release" entry point but not the "debug" entry point.
+ // MAY be omitted if the buildType specifies a default value.
+ EntryPoint string `json:"entryPoint,omitempty"`
+}
+
+// ProvenanceMetadata contains metadata for the built artifact.
+type ProvenanceMetadata struct {
+ // BuildInvocationID identifies this particular build invocation, which can be useful for
+ // finding associated logs or other ad-hoc analysis. The exact meaning and format is defined
+ // by [common.ProvenanceBuilder.ID]; by default it is treated as opaque and case-sensitive.
+ // The value SHOULD be globally unique.
+ BuildInvocationID string `json:"buildInvocationID,omitempty"`
+
+ // BuildStartedOn is the timestamp of when the build started.
+ //
+ // Use pointer to make sure that the abscense of a time is not
+ // encoded as the Epoch time.
+ BuildStartedOn *time.Time `json:"buildStartedOn,omitempty"`
+ // BuildFinishedOn is the timestamp of when the build completed.
+ BuildFinishedOn *time.Time `json:"buildFinishedOn,omitempty"`
+
+ // Completeness indicates that the builder claims certain fields in this message to be
+ // complete.
+ Completeness ProvenanceComplete `json:"completeness"`
+
+ // Reproducible if true, means the builder claims that running invocation on materials will
+ // produce bit-for-bit identical output.
+ Reproducible bool `json:"reproducible"`
+}
+
+// ProvenanceComplete indicates wheter the claims in build/recipe are complete.
+// For in depth information refer to the specifictaion:
+// https://github.com/in-toto/attestation/blob/v0.1.0/spec/predicates/provenance.md
+type ProvenanceComplete struct {
+ // Parameters if true, means the builder claims that [ProvenanceInvocation.Parameters] is
+ // complete, meaning that all external inputs are properly captured in
+ // ProvenanceInvocation.Parameters.
+ Parameters bool `json:"parameters"`
+ // Environment if true, means the builder claims that [ProvenanceInvocation.Environment] is
+ // complete.
+ Environment bool `json:"environment"`
+ // Materials if true, means the builder claims that materials is complete, usually through some
+ // controls to prevent network access. Sometimes called “hermetic”.
+ Materials bool `json:"materials"`
+}
diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1/provenance.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1/provenance.go
new file mode 100644
index 00000000000..e849731dceb
--- /dev/null
+++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1/provenance.go
@@ -0,0 +1,151 @@
+package v1
+
+import (
+ "time"
+
+ "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common"
+)
+
+const (
+ // PredicateSLSAProvenance represents a build provenance for an artifact.
+ PredicateSLSAProvenance = "https://slsa.dev/provenance/v1"
+)
+
+// ProvenancePredicate is the provenance predicate definition.
+type ProvenancePredicate struct {
+ // The BuildDefinition describes all of the inputs to the build. The
+ // accuracy and completeness are implied by runDetails.builder.id.
+ //
+ // It SHOULD contain all the information necessary and sufficient to
+ // initialize the build and begin execution.
+ BuildDefinition ProvenanceBuildDefinition `json:"buildDefinition"`
+
+ // Details specific to this particular execution of the build.
+ RunDetails ProvenanceRunDetails `json:"runDetails"`
+}
+
+// ProvenanceBuildDefinition describes the inputs to the build.
+type ProvenanceBuildDefinition struct {
+ // Identifies the template for how to perform the build and interpret the
+ // parameters and dependencies.
+
+ // The URI SHOULD resolve to a human-readable specification that includes:
+ // overall description of the build type; schema for externalParameters and
+ // systemParameters; unambiguous instructions for how to initiate the build
+ // given this BuildDefinition, and a complete example.
+ BuildType string `json:"buildType"`
+
+ // The parameters that are under external control, such as those set by a
+ // user or tenant of the build system. They MUST be complete at SLSA Build
+ // L3, meaning that that there is no additional mechanism for an external
+ // party to influence the build. (At lower SLSA Build levels, the
+ // completeness MAY be best effort.)
+
+ // The build system SHOULD be designed to minimize the size and complexity
+ // of externalParameters, in order to reduce fragility and ease
+ // verification. Consumers SHOULD have an expectation of what “good” looks
+ // like; the more information that they need to check, the harder that task
+ // becomes.
+ ExternalParameters interface{} `json:"externalParameters"`
+
+ // The parameters that are under the control of the entity represented by
+ // builder.id. The primary intention of this field is for debugging,
+ // incident response, and vulnerability management. The values here MAY be
+ // necessary for reproducing the build. There is no need to verify these
+ // parameters because the build system is already trusted, and in many cases
+ // it is not practical to do so.
+ InternalParameters interface{} `json:"internalParameters,omitempty"`
+
+ // Unordered collection of artifacts needed at build time. Completeness is
+ // best effort, at least through SLSA Build L3. For example, if the build
+ // script fetches and executes “example.com/foo.sh”, which in turn fetches
+ // “example.com/bar.tar.gz”, then both “foo.sh” and “bar.tar.gz” SHOULD be
+ // listed here.
+ ResolvedDependencies []ResourceDescriptor `json:"resolvedDependencies,omitempty"`
+}
+
+// ProvenanceRunDetails includes details specific to a particular execution of a
+// build.
+type ProvenanceRunDetails struct {
+ // Identifies the entity that executed the invocation, which is trusted to
+ // have correctly performed the operation and populated this provenance.
+ //
+ // This field is REQUIRED for SLSA Build 1 unless id is implicit from the
+ // attestation envelope.
+ Builder Builder `json:"builder"`
+
+ // Metadata about this particular execution of the build.
+ BuildMetadata BuildMetadata `json:"metadata,omitempty"`
+
+ // Additional artifacts generated during the build that are not considered
+ // the “output” of the build but that might be needed during debugging or
+ // incident response. For example, this might reference logs generated
+ // during the build and/or a digest of the fully evaluated build
+ // configuration.
+ //
+ // In most cases, this SHOULD NOT contain all intermediate files generated
+ // during the build. Instead, this SHOULD only contain files that are
+ // likely to be useful later and that cannot be easily reproduced.
+ Byproducts []ResourceDescriptor `json:"byproducts,omitempty"`
+}
+
+// ResourceDescriptor describes a particular software artifact or resource
+// (mutable or immutable).
+// See https://github.com/in-toto/attestation/blob/main/spec/v1.0/resource_descriptor.md
+type ResourceDescriptor struct {
+ // A URI used to identify the resource or artifact globally. This field is
+ // REQUIRED unless either digest or content is set.
+ URI string `json:"uri,omitempty"`
+
+ // A set of cryptographic digests of the contents of the resource or
+ // artifact. This field is REQUIRED unless either uri or content is set.
+ Digest common.DigestSet `json:"digest,omitempty"`
+
+ // TMachine-readable identifier for distinguishing between descriptors.
+ Name string `json:"name,omitempty"`
+
+ // The location of the described resource or artifact, if different from the
+ // uri.
+ DownloadLocation string `json:"downloadLocation,omitempty"`
+
+ // The MIME Type (i.e., media type) of the described resource or artifact.
+ MediaType string `json:"mediaType,omitempty"`
+
+ // The contents of the resource or artifact. This field is REQUIRED unless
+ // either uri or digest is set.
+ Content []byte `json:"content,omitempty"`
+
+ // This field MAY be used to provide additional information or metadata
+ // about the resource or artifact that may be useful to the consumer when
+ // evaluating the attestation against a policy.
+ Annotations map[string]interface{} `json:"annotations,omitempty"`
+}
+
+// Builder represents the transitive closure of all the entities that are, by
+// necessity, trusted to faithfully run the build and record the provenance.
+type Builder struct {
+ // URI indicating the transitive closure of the trusted builder.
+ ID string `json:"id"`
+
+ // Version numbers of components of the builder.
+ Version map[string]string `json:"version,omitempty"`
+
+ // Dependencies used by the orchestrator that are not run within the
+ // workload and that do not affect the build, but might affect the
+ // provenance generation or security guarantees.
+ BuilderDependencies []ResourceDescriptor `json:"builderDependencies,omitempty"`
+}
+
+type BuildMetadata struct {
+ // Identifies this particular build invocation, which can be useful for
+ // finding associated logs or other ad-hoc analysis. The exact meaning and
+ // format is defined by builder.id; by default it is treated as opaque and
+ // case-sensitive. The value SHOULD be globally unique.
+ InvocationID string `json:"invocationID,omitempty"`
+
+ // The timestamp of when the build started.
+ StartedOn *time.Time `json:"startedOn,omitempty"`
+
+ // The timestamp of when the build completed.
+ FinishedOn *time.Time `json:"finishedOn,omitempty"`
+}
diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go
new file mode 100644
index 00000000000..5c36dede13d
--- /dev/null
+++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go
@@ -0,0 +1,190 @@
+package in_toto
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+var ErrUnknownMetadataType = errors.New("unknown metadata type encountered: not link or layout")
+
+/*
+Set represents a data structure for set operations. See `NewSet` for how to
+create a Set, and available Set receivers for useful set operations.
+
+Under the hood Set aliases map[string]struct{}, where the map keys are the set
+elements and the map values are a memory-efficient way of storing the keys.
+*/
+type Set map[string]struct{}
+
+/*
+NewSet creates a new Set, assigns it the optionally passed variadic string
+elements, and returns it.
+*/
+func NewSet(elems ...string) Set {
+ var s Set = make(map[string]struct{})
+ for _, elem := range elems {
+ s.Add(elem)
+ }
+ return s
+}
+
+/*
+Has returns True if the passed string is member of the set on which it was
+called and False otherwise.
+*/
+func (s Set) Has(elem string) bool {
+ _, ok := s[elem]
+ return ok
+}
+
+/*
+Add adds the passed string to the set on which it was called, if the string is
+not a member of the set.
+*/
+func (s Set) Add(elem string) {
+ s[elem] = struct{}{}
+}
+
+/*
+Remove removes the passed string from the set on which was is called, if the
+string is a member of the set.
+*/
+func (s Set) Remove(elem string) {
+ delete(s, elem)
+}
+
+/*
+Intersection creates and returns a new Set with the elements of the set on
+which it was called that are also in the passed set.
+*/
+func (s Set) Intersection(s2 Set) Set {
+ res := NewSet()
+ for elem := range s {
+ if !s2.Has(elem) {
+ continue
+ }
+ res.Add(elem)
+ }
+ return res
+}
+
+/*
+Difference creates and returns a new Set with the elements of the set on
+which it was called that are not in the passed set.
+*/
+func (s Set) Difference(s2 Set) Set {
+ res := NewSet()
+ for elem := range s {
+ if s2.Has(elem) {
+ continue
+ }
+ res.Add(elem)
+ }
+ return res
+}
+
+/*
+Filter creates and returns a new Set with the elements of the set on which it
+was called that match the passed pattern. A matching error is treated like a
+non-match plus a warning is printed.
+*/
+func (s Set) Filter(pattern string) Set {
+ res := NewSet()
+ for elem := range s {
+ matched, err := match(pattern, elem)
+ if err != nil {
+ fmt.Printf("WARNING: %s, pattern was '%s'\n", err, pattern)
+ continue
+ }
+ if !matched {
+ continue
+ }
+ res.Add(elem)
+ }
+ return res
+}
+
+/*
+Slice creates and returns an unordered string slice with the elements of the
+set on which it was called.
+*/
+func (s Set) Slice() []string {
+ var res []string
+ res = make([]string, 0, len(s))
+ for elem := range s {
+ res = append(res, elem)
+ }
+ return res
+}
+
+/*
+InterfaceKeyStrings returns string keys of passed interface{} map in an
+unordered string slice.
+*/
+func InterfaceKeyStrings(m map[string]interface{}) []string {
+ res := make([]string, len(m))
+ i := 0
+ for k := range m {
+ res[i] = k
+ i++
+ }
+ return res
+}
+
+/*
+IsSubSet checks if the parameter subset is a
+subset of the superset s.
+*/
+func (s Set) IsSubSet(subset Set) bool {
+ if len(subset) > len(s) {
+ return false
+ }
+ for key := range subset {
+ if s.Has(key) {
+ continue
+ } else {
+ return false
+ }
+ }
+ return true
+}
+
+func loadPayload(payloadBytes []byte) (any, error) {
+ var payload map[string]any
+ if err := json.Unmarshal(payloadBytes, &payload); err != nil {
+ return nil, fmt.Errorf("error decoding payload: %w", err)
+ }
+
+ if payload["_type"] == "link" {
+ var link Link
+ if err := checkRequiredJSONFields(payload, reflect.TypeOf(link)); err != nil {
+ return nil, fmt.Errorf("error decoding payload: %w", err)
+ }
+
+ decoder := json.NewDecoder(strings.NewReader(string(payloadBytes)))
+ decoder.DisallowUnknownFields()
+ if err := decoder.Decode(&link); err != nil {
+ return nil, fmt.Errorf("error decoding payload: %w", err)
+ }
+
+ return link, nil
+ } else if payload["_type"] == "layout" {
+ var layout Layout
+ if err := checkRequiredJSONFields(payload, reflect.TypeOf(layout)); err != nil {
+ return nil, fmt.Errorf("error decoding payload: %w", err)
+ }
+
+ decoder := json.NewDecoder(strings.NewReader(string(payloadBytes)))
+ decoder.DisallowUnknownFields()
+ if err := decoder.Decode(&layout); err != nil {
+ return nil, fmt.Errorf("error decoding payload: %w", err)
+ }
+
+ return layout, nil
+ }
+
+ return nil, ErrUnknownMetadataType
+}
diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/util_unix.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/util_unix.go
new file mode 100644
index 00000000000..f555f79a528
--- /dev/null
+++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/util_unix.go
@@ -0,0 +1,14 @@
+//go:build linux || darwin || !windows
+// +build linux darwin !windows
+
+package in_toto
+
+import "golang.org/x/sys/unix"
+
+func isWritable(path string) error {
+ err := unix.Access(path, unix.W_OK)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/util_windows.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/util_windows.go
new file mode 100644
index 00000000000..8552f0345d0
--- /dev/null
+++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/util_windows.go
@@ -0,0 +1,25 @@
+package in_toto
+
+import (
+ "errors"
+ "os"
+)
+
+func isWritable(path string) error {
+ // get fileInfo
+ info, err := os.Stat(path)
+ if err != nil {
+ return err
+ }
+
+ // check if path is a directory
+ if !info.IsDir() {
+ return errors.New("not a directory")
+ }
+
+ // Check if the user bit is enabled in file permission
+ if info.Mode().Perm()&(1<<(uint(7))) == 0 {
+ return errors.New("not writable")
+ }
+ return nil
+}
diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go
new file mode 100644
index 00000000000..2564bd47eb2
--- /dev/null
+++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go
@@ -0,0 +1,1108 @@
+/*
+Package in_toto implements types and routines to verify a software supply chain
+according to the in-toto specification.
+See https://github.com/in-toto/docs/blob/master/in-toto-spec.md
+*/
+package in_toto
+
+import (
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "strings"
+ "time"
+)
+
+// ErrInspectionRunDirIsSymlink gets thrown if the runDir is a symlink
+var ErrInspectionRunDirIsSymlink = errors.New("runDir is a symlink. This is a security risk")
+
+var ErrNotLayout = errors.New("verification workflow passed a non-layout")
+
+/*
+RunInspections iteratively executes the command in the Run field of all
+inspections of the passed layout, creating unsigned link metadata that records
+all files found in the current working directory as materials (before command
+execution) and products (after command execution). A map with inspection names
+as keys and Metablocks containing the generated link metadata as values is
+returned. The format is:
+
+ {
+ : Metablock,
+ : Metablock,
+ ...
+ }
+
+If executing the inspection command fails, or if the executed command has a
+non-zero exit code, the first return value is an empty Metablock map and the
+second return value is the error.
+*/
+func RunInspections(layout Layout, runDir string, lineNormalization bool, useDSSE bool) (map[string]Metadata, error) {
+ inspectionMetadata := make(map[string]Metadata)
+
+ for _, inspection := range layout.Inspect {
+
+ paths := []string{"."}
+ if runDir != "" {
+ paths = []string{runDir}
+ }
+
+ linkEnv, err := InTotoRun(inspection.Name, runDir, paths, paths,
+ inspection.Run, Key{}, []string{"sha256"}, nil, nil, lineNormalization, false, useDSSE)
+
+ if err != nil {
+ return nil, err
+ }
+
+ retVal := linkEnv.GetPayload().(Link).ByProducts["return-value"]
+ if retVal != float64(0) {
+ return nil, fmt.Errorf("inspection command '%s' of inspection '%s'"+
+ " returned a non-zero value: %d", inspection.Run, inspection.Name,
+ retVal)
+ }
+
+ // Dump inspection link to cwd using the short link name format
+ linkName := fmt.Sprintf(LinkNameFormatShort, inspection.Name)
+ if err := linkEnv.Dump(linkName); err != nil {
+ fmt.Printf("JSON serialization or writing failed: %s", err)
+ }
+
+ inspectionMetadata[inspection.Name] = linkEnv
+ }
+ return inspectionMetadata, nil
+}
+
+// verifyMatchRule is a helper function to process artifact rules of
+// type MATCH. See VerifyArtifacts for more details.
+func verifyMatchRule(ruleData map[string]string,
+ srcArtifacts map[string]interface{}, srcArtifactQueue Set,
+ itemsMetadata map[string]Metadata) Set {
+ consumed := NewSet()
+ // Get destination link metadata
+ dstLinkEnv, exists := itemsMetadata[ruleData["dstName"]]
+ if !exists {
+ // Destination link does not exist, rule can't consume any
+ // artifacts
+ return consumed
+ }
+
+ // Get artifacts from destination link metadata
+ var dstArtifacts map[string]interface{}
+ switch ruleData["dstType"] {
+ case "materials":
+ dstArtifacts = dstLinkEnv.GetPayload().(Link).Materials
+ case "products":
+ dstArtifacts = dstLinkEnv.GetPayload().(Link).Products
+ }
+
+ // cleanup paths in pattern and artifact maps
+ if ruleData["pattern"] != "" {
+ ruleData["pattern"] = path.Clean(ruleData["pattern"])
+ }
+ for k := range srcArtifacts {
+ if path.Clean(k) != k {
+ srcArtifacts[path.Clean(k)] = srcArtifacts[k]
+ delete(srcArtifacts, k)
+ }
+ }
+ for k := range dstArtifacts {
+ if path.Clean(k) != k {
+ dstArtifacts[path.Clean(k)] = dstArtifacts[k]
+ delete(dstArtifacts, k)
+ }
+ }
+
+ // Normalize optional source and destination prefixes, i.e. if
+ // there is a prefix, then add a trailing slash if not there yet
+ for _, prefix := range []string{"srcPrefix", "dstPrefix"} {
+ if ruleData[prefix] != "" {
+ ruleData[prefix] = path.Clean(ruleData[prefix])
+ if !strings.HasSuffix(ruleData[prefix], "/") {
+ ruleData[prefix] += "/"
+ }
+ }
+ }
+ // Iterate over queue and mark consumed artifacts
+ for srcPath := range srcArtifactQueue {
+ // Remove optional source prefix from source artifact path
+ // Noop if prefix is empty, or artifact does not have it
+ srcBasePath := strings.TrimPrefix(srcPath, ruleData["srcPrefix"])
+
+ // Ignore artifacts not matched by rule pattern
+ matched, err := match(ruleData["pattern"], srcBasePath)
+ if err != nil || !matched {
+ continue
+ }
+
+ // Construct corresponding destination artifact path, i.e.
+ // an optional destination prefix plus the source base path
+ dstPath := path.Clean(path.Join(ruleData["dstPrefix"], srcBasePath))
+
+ // Try to find the corresponding destination artifact
+ dstArtifact, exists := dstArtifacts[dstPath]
+ // Ignore artifacts without corresponding destination artifact
+ if !exists {
+ continue
+ }
+
+ // Ignore artifact pairs with no matching hashes
+ if !reflect.DeepEqual(srcArtifacts[srcPath], dstArtifact) {
+ continue
+ }
+
+ // Only if a source and destination artifact pair was found and
+ // their hashes are equal, will we mark the source artifact as
+ // successfully consumed, i.e. it will be removed from the queue
+ consumed.Add(srcPath)
+ }
+ return consumed
+}
+
+/*
+VerifyArtifacts iteratively applies the material and product rules of the
+passed items (step or inspection) to enforce and authorize artifacts (materials
+or products) reported by the corresponding link and to guarantee that
+artifacts are linked together across links. In the beginning all artifacts are
+placed in a queue according to their type. If an artifact gets consumed by a
+rule it is removed from the queue. An artifact can only be consumed once in
+the course of processing the set of rules in ExpectedMaterials or
+ExpectedProducts.
+
+Rules of type MATCH, ALLOW, CREATE, DELETE, MODIFY and DISALLOW are supported.
+
+All rules except for DISALLOW consume queued artifacts on success, and
+leave the queue unchanged on failure. Hence, it is left to a terminal
+DISALLOW rule to fail overall verification, if artifacts are left in the queue
+that should have been consumed by preceding rules.
+*/
+func VerifyArtifacts(items []interface{},
+ itemsMetadata map[string]Metadata) error {
+ // Verify artifact rules for each item in the layout
+ for _, itemI := range items {
+ // The layout item (interface) must be a Link or an Inspection we are only
+ // interested in the name and the expected materials and products
+ var itemName string
+ var expectedMaterials [][]string
+ var expectedProducts [][]string
+
+ switch item := itemI.(type) {
+ case Step:
+ itemName = item.Name
+ expectedMaterials = item.ExpectedMaterials
+ expectedProducts = item.ExpectedProducts
+
+ case Inspection:
+ itemName = item.Name
+ expectedMaterials = item.ExpectedMaterials
+ expectedProducts = item.ExpectedProducts
+
+ default: // Something wrong
+ return fmt.Errorf("VerifyArtifacts received an item of invalid type,"+
+ " elements of passed slice 'items' must be one of 'Step' or"+
+ " 'Inspection', got: '%s'", reflect.TypeOf(item))
+ }
+
+ // Use the item's name to extract the corresponding link
+ srcLinkEnv, exists := itemsMetadata[itemName]
+ if !exists {
+ return fmt.Errorf("VerifyArtifacts could not find metadata"+
+ " for item '%s', got: '%s'", itemName, itemsMetadata)
+ }
+
+ // Create shortcuts to materials and products (including hashes) reported
+ // by the item's link, required to verify "match" rules
+ materials := srcLinkEnv.GetPayload().(Link).Materials
+ products := srcLinkEnv.GetPayload().(Link).Products
+
+ // All other rules only require the material or product paths (without
+ // hashes). We extract them from the corresponding maps and store them as
+ // sets for convenience in further processing
+ materialPaths := NewSet()
+ for _, p := range InterfaceKeyStrings(materials) {
+ materialPaths.Add(path.Clean(p))
+ }
+ productPaths := NewSet()
+ for _, p := range InterfaceKeyStrings(products) {
+ productPaths.Add(path.Clean(p))
+ }
+
+ // For `create`, `delete` and `modify` rules we prepare sets of artifacts
+ // (without hashes) that were created, deleted or modified in the current
+ // step or inspection
+ created := productPaths.Difference(materialPaths)
+ deleted := materialPaths.Difference(productPaths)
+ remained := materialPaths.Intersection(productPaths)
+ modified := NewSet()
+ for name := range remained {
+ if !reflect.DeepEqual(materials[name], products[name]) {
+ modified.Add(name)
+ }
+ }
+
+ // For each item we have to run rule verification, once per artifact type.
+ // Here we prepare the corresponding data for each round.
+ verificationDataList := []map[string]interface{}{
+ {
+ "srcType": "materials",
+ "rules": expectedMaterials,
+ "artifacts": materials,
+ "artifactPaths": materialPaths,
+ },
+ {
+ "srcType": "products",
+ "rules": expectedProducts,
+ "artifacts": products,
+ "artifactPaths": productPaths,
+ },
+ }
+ // TODO: Add logging library (see in-toto/in-toto-golang#4)
+ // fmt.Printf("Verifying %s '%s' ", reflect.TypeOf(itemI), itemName)
+
+ // Process all material rules using the corresponding materials and all
+ // product rules using the corresponding products
+ for _, verificationData := range verificationDataList {
+ // TODO: Add logging library (see in-toto/in-toto-golang#4)
+ // fmt.Printf("%s...\n", verificationData["srcType"])
+
+ rules := verificationData["rules"].([][]string)
+ artifacts := verificationData["artifacts"].(map[string]interface{})
+
+ // Use artifacts (without hashes) as base queue. Each rule only operates
+ // on artifacts in that queue. If a rule consumes an artifact (i.e. can
+ // be applied successfully), the artifact is removed from the queue. By
+ // applying a DISALLOW rule eventually, verification may return an error,
+ // if the rule matches any artifacts in the queue that should have been
+ // consumed earlier.
+ queue := verificationData["artifactPaths"].(Set)
+
+ // TODO: Add logging library (see in-toto/in-toto-golang#4)
+ // fmt.Printf("Initial state\nMaterials: %s\nProducts: %s\nQueue: %s\n\n",
+ // materialPaths.Slice(), productPaths.Slice(), queue.Slice())
+
+ // Verify rules sequentially
+ for _, rule := range rules {
+ // Parse rule and error out if it is malformed
+ // NOTE: the rule format should have been validated before
+ ruleData, err := UnpackRule(rule)
+ if err != nil {
+ return err
+ }
+
+ // Apply rule pattern to filter queued artifacts that are up for rule
+ // specific consumption
+ filtered := queue.Filter(path.Clean(ruleData["pattern"]))
+
+ var consumed Set
+ switch ruleData["type"] {
+ case "match":
+ // Note: here we need to perform more elaborate filtering
+ consumed = verifyMatchRule(ruleData, artifacts, queue, itemsMetadata)
+
+ case "allow":
+ // Consumes all filtered artifacts
+ consumed = filtered
+
+ case "create":
+ // Consumes filtered artifacts that were created
+ consumed = filtered.Intersection(created)
+
+ case "delete":
+ // Consumes filtered artifacts that were deleted
+ consumed = filtered.Intersection(deleted)
+
+ case "modify":
+ // Consumes filtered artifacts that were modified
+ consumed = filtered.Intersection(modified)
+
+ case "disallow":
+ // Does not consume but errors out if artifacts were filtered
+ if len(filtered) > 0 {
+ return fmt.Errorf("artifact verification failed for %s '%s',"+
+ " %s %s disallowed by rule %s",
+ reflect.TypeOf(itemI).Name(), itemName,
+ verificationData["srcType"], filtered.Slice(), rule)
+ }
+ case "require":
+ // REQUIRE is somewhat of a weird animal that does not use
+ // patterns bur rather single filenames (for now).
+ if !queue.Has(ruleData["pattern"]) {
+ return fmt.Errorf("artifact verification failed for %s in REQUIRE '%s',"+
+ " because %s is not in %s", verificationData["srcType"],
+ ruleData["pattern"], ruleData["pattern"], queue.Slice())
+ }
+ }
+ // Update queue by removing consumed artifacts
+ queue = queue.Difference(consumed)
+ // TODO: Add logging library (see in-toto/in-toto-golang#4)
+ // fmt.Printf("Rule: %s\nQueue: %s\n\n", rule, queue.Slice())
+ }
+ }
+ }
+ return nil
+}
+
+/*
+ReduceStepsMetadata merges for each step of the passed Layout all the passed
+per-functionary links into a single link, asserting that the reported Materials
+and Products are equal across links for a given step. This function may be
+used at a time during the overall verification, where link threshold's have
+been verified and subsequent verification only needs one exemplary link per
+step. The function returns a map with one Metablock (link) per step:
+
+ {
+ : Metablock,
+ : Metablock,
+ ...
+ }
+
+If links corresponding to the same step report different Materials or different
+Products, the first return value is an empty Metablock map and the second
+return value is the error.
+*/
+func ReduceStepsMetadata(layout Layout,
+ stepsMetadata map[string]map[string]Metadata) (map[string]Metadata,
+ error) {
+ stepsMetadataReduced := make(map[string]Metadata)
+
+ for _, step := range layout.Steps {
+ linksPerStep, ok := stepsMetadata[step.Name]
+ // We should never get here, layout verification must fail earlier
+ if !ok || len(linksPerStep) < 1 {
+ panic("Could not reduce metadata for step '" + step.Name +
+ "', no link metadata found.")
+ }
+
+ // Get the first link (could be any link) for the current step, which will
+ // serve as reference link for below comparisons
+ var referenceKeyID string
+ var referenceLinkEnv Metadata
+ for keyID, linkEnv := range linksPerStep {
+ referenceLinkEnv = linkEnv
+ referenceKeyID = keyID
+ break
+ }
+
+ // Only one link, nothing to reduce, take the reference link
+ if len(linksPerStep) == 1 {
+ stepsMetadataReduced[step.Name] = referenceLinkEnv
+
+ // Multiple links, reduce but first check
+ } else {
+ // Artifact maps must be equal for each type among all links
+ // TODO: What should we do if there are more links, than the
+ // threshold requires, but not all of them are equal? Right now we would
+ // also error.
+ for keyID, linkEnv := range linksPerStep {
+ if !reflect.DeepEqual(linkEnv.GetPayload().(Link).Materials,
+ referenceLinkEnv.GetPayload().(Link).Materials) ||
+ !reflect.DeepEqual(linkEnv.GetPayload().(Link).Products,
+ referenceLinkEnv.GetPayload().(Link).Products) {
+ return nil, fmt.Errorf("link '%s' and '%s' have different"+
+ " artifacts",
+ fmt.Sprintf(LinkNameFormat, step.Name, referenceKeyID),
+ fmt.Sprintf(LinkNameFormat, step.Name, keyID))
+ }
+ }
+ // We haven't errored out, so we can reduce (i.e take the reference link)
+ stepsMetadataReduced[step.Name] = referenceLinkEnv
+ }
+ }
+ return stepsMetadataReduced, nil
+}
+
+/*
+VerifyStepCommandAlignment (soft) verifies that for each step of the passed
+layout the command executed, as per the passed link, matches the expected
+command, as per the layout. Soft verification means that, in case a command
+does not align, a warning is issued.
+*/
+func VerifyStepCommandAlignment(layout Layout,
+ stepsMetadata map[string]map[string]Metadata) {
+ for _, step := range layout.Steps {
+ linksPerStep, ok := stepsMetadata[step.Name]
+ // We should never get here, layout verification must fail earlier
+ if !ok || len(linksPerStep) < 1 {
+ panic("Could not verify command alignment for step '" + step.Name +
+ "', no link metadata found.")
+ }
+
+ for signerKeyID, linkEnv := range linksPerStep {
+ expectedCommandS := strings.Join(step.ExpectedCommand, " ")
+ executedCommandS := strings.Join(linkEnv.GetPayload().(Link).Command, " ")
+
+ if expectedCommandS != executedCommandS {
+ linkName := fmt.Sprintf(LinkNameFormat, step.Name, signerKeyID)
+ fmt.Printf("WARNING: Expected command for step '%s' (%s) and command"+
+ " reported by '%s' (%s) differ.\n",
+ step.Name, expectedCommandS, linkName, executedCommandS)
+ }
+ }
+ }
+}
+
+/*
+LoadLayoutCertificates loads the root and intermediate CAs from the layout if in the layout.
+This will be used to check signatures that were used to sign links but not configured
+in the PubKeys section of the step. No configured CAs means we don't want to allow this.
+Returned CertPools will be empty in this case.
+*/
+func LoadLayoutCertificates(layout Layout, intermediatePems [][]byte) (*x509.CertPool, *x509.CertPool, error) {
+ rootPool := x509.NewCertPool()
+ for _, certPem := range layout.RootCas {
+ ok := rootPool.AppendCertsFromPEM([]byte(certPem.KeyVal.Certificate))
+ if !ok {
+ return nil, nil, fmt.Errorf("failed to load root certificates for layout")
+ }
+ }
+
+ intermediatePool := x509.NewCertPool()
+ for _, intermediatePem := range layout.IntermediateCas {
+ ok := intermediatePool.AppendCertsFromPEM([]byte(intermediatePem.KeyVal.Certificate))
+ if !ok {
+ return nil, nil, fmt.Errorf("failed to load intermediate certificates for layout")
+ }
+ }
+
+ for _, intermediatePem := range intermediatePems {
+ ok := intermediatePool.AppendCertsFromPEM(intermediatePem)
+ if !ok {
+ return nil, nil, fmt.Errorf("failed to load provided intermediate certificates")
+ }
+ }
+
+ return rootPool, intermediatePool, nil
+}
+
+/*
+VerifyLinkSignatureThesholds verifies that for each step of the passed layout,
+there are at least Threshold links, validly signed by different authorized
+functionaries. The returned map of link metadata per steps contains only
+links with valid signatures from distinct functionaries and has the format:
+
+ {
+ : {
+ : Metablock,
+ : Metablock,
+ ...
+ },
+ : {
+ : Metablock,
+ : Metablock,
+ ...
+ }
+ ...
+ }
+
+If for any step of the layout there are not enough links available, the first
+return value is an empty map of Metablock maps and the second return value is
+the error.
+*/
+func VerifyLinkSignatureThesholds(layout Layout,
+ stepsMetadata map[string]map[string]Metadata, rootCertPool, intermediateCertPool *x509.CertPool) (
+ map[string]map[string]Metadata, error) {
+ // This will stores links with valid signature from an authorized functionary
+ // for all steps
+ stepsMetadataVerified := make(map[string]map[string]Metadata)
+
+ // Try to find enough (>= threshold) links each with a valid signature from
+ // distinct authorized functionaries for each step
+ for _, step := range layout.Steps {
+ var stepErr error
+
+ // This will store links with valid signature from an authorized
+ // functionary for the given step
+ linksPerStepVerified := make(map[string]Metadata)
+
+ // Check if there are any links at all for the given step
+ linksPerStep, ok := stepsMetadata[step.Name]
+ if !ok || len(linksPerStep) < 1 {
+ stepErr = fmt.Errorf("no links found")
+ }
+
+ // For each link corresponding to a step, check that the signer key was
+ // authorized, the layout contains a verification key and the signature
+ // verification passes. Only good links are stored, to verify thresholds
+ // below.
+ isAuthorizedSignature := false
+ for signerKeyID, linkEnv := range linksPerStep {
+ for _, authorizedKeyID := range step.PubKeys {
+ if signerKeyID == authorizedKeyID {
+ if verifierKey, ok := layout.Keys[authorizedKeyID]; ok {
+ if err := linkEnv.VerifySignature(verifierKey); err == nil {
+ linksPerStepVerified[signerKeyID] = linkEnv
+ isAuthorizedSignature = true
+ break
+ }
+ }
+ }
+ }
+
+ // If the signer's key wasn't in our step's pubkeys array, check the cert pool to
+ // see if the key is known to us.
+ if !isAuthorizedSignature {
+ sig, err := linkEnv.GetSignatureForKeyID(signerKeyID)
+ if err != nil {
+ stepErr = err
+ continue
+ }
+
+ cert, err := sig.GetCertificate()
+ if err != nil {
+ stepErr = err
+ continue
+ }
+
+ // test certificate against the step's constraints to make sure it's a valid functionary
+ err = step.CheckCertConstraints(cert, layout.RootCAIDs(), rootCertPool, intermediateCertPool)
+ if err != nil {
+ stepErr = err
+ continue
+ }
+
+ err = linkEnv.VerifySignature(cert)
+ if err != nil {
+ stepErr = err
+ continue
+ }
+
+ linksPerStepVerified[signerKeyID] = linkEnv
+ }
+ }
+
+ // Store all good links for a step
+ stepsMetadataVerified[step.Name] = linksPerStepVerified
+
+ if len(linksPerStepVerified) < step.Threshold {
+ linksPerStep := stepsMetadata[step.Name]
+ return nil, fmt.Errorf("step '%s' requires '%d' link metadata file(s)."+
+ " '%d' out of '%d' available link(s) have a valid signature from an"+
+ " authorized signer: %v", step.Name, step.Threshold,
+ len(linksPerStepVerified), len(linksPerStep), stepErr)
+ }
+ }
+ return stepsMetadataVerified, nil
+}
+
+/*
+LoadLinksForLayout loads for every Step of the passed Layout a Metablock
+containing the corresponding Link. A base path to a directory that contains
+the links may be passed using linkDir. Link file names are constructed,
+using LinkNameFormat together with the corresponding step name and authorized
+functionary key ids. A map of link metadata is returned and has the following
+format:
+
+ {
+ : {
+ : Metablock,
+ : Metablock,
+ ...
+ },
+ : {
+ : Metablock,
+ : Metablock,
+ ...
+ }
+ ...
+ }
+
+If a link cannot be loaded at a constructed link name or is invalid, it is
+ignored. Only a preliminary threshold check is performed, that is, if there
+aren't at least Threshold links for any given step, the first return value
+is an empty map of Metablock maps and the second return value is the error.
+*/
+func LoadLinksForLayout(layout Layout, linkDir string) (map[string]map[string]Metadata, error) {
+ stepsMetadata := make(map[string]map[string]Metadata)
+
+ for _, step := range layout.Steps {
+ linksPerStep := make(map[string]Metadata)
+ // Since we can verify against certificates belonging to a CA, we need to
+ // load any possible links
+ linkFiles, err := filepath.Glob(path.Join(linkDir, fmt.Sprintf(LinkGlobFormat, step.Name)))
+ if err != nil {
+ return nil, err
+ }
+
+ for _, linkPath := range linkFiles {
+ linkEnv, err := LoadMetadata(linkPath)
+ if err != nil {
+ continue
+ }
+
+ // To get the full key from the metadata's signatures, we have to check
+ // for one with the same short id...
+ signerShortKeyID := strings.TrimSuffix(strings.TrimPrefix(filepath.Base(linkPath), step.Name+"."), ".link")
+ for _, sig := range linkEnv.Sigs() {
+ if strings.HasPrefix(sig.KeyID, signerShortKeyID) {
+ linksPerStep[sig.KeyID] = linkEnv
+ break
+ }
+ }
+ }
+
+ if len(linksPerStep) < step.Threshold {
+ return nil, fmt.Errorf("step '%s' requires '%d' link metadata file(s),"+
+ " found '%d'", step.Name, step.Threshold, len(linksPerStep))
+ }
+
+ stepsMetadata[step.Name] = linksPerStep
+ }
+
+ return stepsMetadata, nil
+}
+
+/*
+VerifyLayoutExpiration verifies that the passed Layout has not expired. It
+returns an error if the (zulu) date in the Expires field is in the past.
+*/
+func VerifyLayoutExpiration(layout Layout) error {
+ expires, err := time.Parse(ISO8601DateSchema, layout.Expires)
+ if err != nil {
+ return err
+ }
+ // Uses timezone of expires, i.e. UTC
+ if time.Until(expires) < 0 {
+ return fmt.Errorf("layout has expired on '%s'", expires)
+ }
+ return nil
+}
+
+/*
+VerifyLayoutSignatures verifies for each key in the passed key map the
+corresponding signature of the Layout in the passed Metablock's Signed field.
+Signatures and keys are associated by key id. If the key map is empty, or the
+Metablock's Signature field does not have a signature for one or more of the
+passed keys, or a matching signature is invalid, an error is returned.
+*/
+func VerifyLayoutSignatures(layoutEnv Metadata,
+ layoutKeys map[string]Key) error {
+ if len(layoutKeys) < 1 {
+ return fmt.Errorf("layout verification requires at least one key")
+ }
+
+ for _, key := range layoutKeys {
+ if err := layoutEnv.VerifySignature(key); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+/*
+GetSummaryLink merges the materials of the first step (as mentioned in the
+layout) and the products of the last step and returns a new link. This link
+reports the materials and products and summarizes the overall software supply
+chain.
+NOTE: The assumption is that the steps mentioned in the layout are to be
+performed sequentially. So, the first step mentioned in the layout denotes what
+comes into the supply chain and the last step denotes what goes out.
+*/
+func GetSummaryLink(layout Layout, stepsMetadataReduced map[string]Metadata,
+ stepName string, useDSSE bool) (Metadata, error) {
+ var summaryLink Link
+ if len(layout.Steps) > 0 {
+ firstStepLink := stepsMetadataReduced[layout.Steps[0].Name]
+ lastStepLink := stepsMetadataReduced[layout.Steps[len(layout.Steps)-1].Name]
+
+ summaryLink.Materials = firstStepLink.GetPayload().(Link).Materials
+ summaryLink.Name = stepName
+ summaryLink.Type = firstStepLink.GetPayload().(Link).Type
+
+ summaryLink.Products = lastStepLink.GetPayload().(Link).Products
+ summaryLink.ByProducts = lastStepLink.GetPayload().(Link).ByProducts
+ // Using the last command of the sublayout as the command
+ // of the summary link can be misleading. Is it necessary to
+ // include all the commands executed as part of sublayout?
+ summaryLink.Command = lastStepLink.GetPayload().(Link).Command
+ }
+
+ if useDSSE {
+ env := &Envelope{}
+ if err := env.SetPayload(summaryLink); err != nil {
+ return nil, err
+ }
+
+ return env, nil
+ }
+
+ return &Metablock{Signed: summaryLink}, nil
+}
+
+/*
+VerifySublayouts checks if any step in the supply chain is a sublayout, and if
+so, recursively resolves it and replaces it with a summary link summarizing the
+steps carried out in the sublayout.
+*/
+func VerifySublayouts(layout Layout,
+ stepsMetadataVerified map[string]map[string]Metadata,
+ superLayoutLinkPath string, intermediatePems [][]byte, lineNormalization bool) (map[string]map[string]Metadata, error) {
+ for stepName, linkData := range stepsMetadataVerified {
+ for keyID, metadata := range linkData {
+ if _, ok := metadata.GetPayload().(Layout); ok {
+ layoutKeys := make(map[string]Key)
+ layoutKeys[keyID] = layout.Keys[keyID]
+
+ sublayoutLinkDir := fmt.Sprintf(SublayoutLinkDirFormat,
+ stepName, keyID)
+ sublayoutLinkPath := filepath.Join(superLayoutLinkPath,
+ sublayoutLinkDir)
+ summaryLink, err := InTotoVerify(metadata, layoutKeys,
+ sublayoutLinkPath, stepName, make(map[string]string), intermediatePems, lineNormalization)
+ if err != nil {
+ return nil, err
+ }
+ linkData[keyID] = summaryLink
+ }
+
+ }
+ }
+ return stepsMetadataVerified, nil
+}
+
+// TODO: find a better way than two helper functions for the replacer op
+
+func substituteParamatersInSlice(replacer *strings.Replacer, slice []string) []string {
+ newSlice := make([]string, 0)
+ for _, item := range slice {
+ newSlice = append(newSlice, replacer.Replace(item))
+ }
+ return newSlice
+}
+
+func substituteParametersInSliceOfSlices(replacer *strings.Replacer,
+ slice [][]string) [][]string {
+ newSlice := make([][]string, 0)
+ for _, item := range slice {
+ newSlice = append(newSlice, substituteParamatersInSlice(replacer,
+ item))
+ }
+ return newSlice
+}
+
+/*
+SubstituteParameters performs parameter substitution in steps and inspections
+in the following fields:
+- Expected Materials and Expected Products of both
+- Run of inspections
+- Expected Command of steps
+The substitution marker is '{}' and the keyword within the braces is replaced
+by a value found in the substitution map passed, parameterDictionary. The
+layout with parameters substituted is returned to the calling function.
+*/
+func SubstituteParameters(layout Layout,
+ parameterDictionary map[string]string) (Layout, error) {
+
+ if len(parameterDictionary) == 0 {
+ return layout, nil
+ }
+
+ parameters := make([]string, 0)
+
+ re := regexp.MustCompile("^[a-zA-Z0-9_-]+$")
+
+ for parameter, value := range parameterDictionary {
+ parameterFormatCheck := re.MatchString(parameter)
+ if !parameterFormatCheck {
+ return layout, fmt.Errorf("invalid format for parameter")
+ }
+
+ parameters = append(parameters, "{"+parameter+"}")
+ parameters = append(parameters, value)
+ }
+
+ replacer := strings.NewReplacer(parameters...)
+
+ for i := range layout.Steps {
+ layout.Steps[i].ExpectedMaterials = substituteParametersInSliceOfSlices(
+ replacer, layout.Steps[i].ExpectedMaterials)
+ layout.Steps[i].ExpectedProducts = substituteParametersInSliceOfSlices(
+ replacer, layout.Steps[i].ExpectedProducts)
+ layout.Steps[i].ExpectedCommand = substituteParamatersInSlice(replacer,
+ layout.Steps[i].ExpectedCommand)
+ }
+
+ for i := range layout.Inspect {
+ layout.Inspect[i].ExpectedMaterials =
+ substituteParametersInSliceOfSlices(replacer,
+ layout.Inspect[i].ExpectedMaterials)
+ layout.Inspect[i].ExpectedProducts =
+ substituteParametersInSliceOfSlices(replacer,
+ layout.Inspect[i].ExpectedProducts)
+ layout.Inspect[i].Run = substituteParamatersInSlice(replacer,
+ layout.Inspect[i].Run)
+ }
+
+ return layout, nil
+}
+
+/*
+InTotoVerify can be used to verify an entire software supply chain according to
+the in-toto specification. It requires the metadata of the root layout, a map
+that contains public keys to verify the root layout signatures, a path to a
+directory from where it can load link metadata files, which are treated as
+signed evidence for the steps defined in the layout, a step name, and a
+paramater dictionary used for parameter substitution. The step name only
+matters for sublayouts, where it's important to associate the summary of that
+step with a unique name. The verification routine is as follows:
+
+1. Verify layout signature(s) using passed key(s)
+2. Verify layout expiration date
+3. Substitute parameters in layout
+4. Load link metadata files for steps of layout
+5. Verify signatures and signature thresholds for steps of layout
+6. Verify sublayouts recursively
+7. Verify command alignment for steps of layout (only warns)
+8. Verify artifact rules for steps of layout
+9. Execute inspection commands (generates link metadata for each inspection)
+10. Verify artifact rules for inspections of layout
+
+InTotoVerify returns a summary link wrapped in a Metablock object and an error
+value. If any of the verification routines fail, verification is aborted and
+error is returned. In such an instance, the first value remains an empty
+Metablock object.
+
+NOTE: Artifact rules of type "create", "modify"
+and "delete" are currently not supported.
+*/
+func InTotoVerify(layoutEnv Metadata, layoutKeys map[string]Key,
+ linkDir string, stepName string, parameterDictionary map[string]string, intermediatePems [][]byte, lineNormalization bool) (
+ Metadata, error) {
+
+ // Verify root signatures
+ if err := VerifyLayoutSignatures(layoutEnv, layoutKeys); err != nil {
+ return nil, err
+ }
+
+ useDSSE := false
+ if _, ok := layoutEnv.(*Envelope); ok {
+ useDSSE = true
+ }
+
+ // Extract the layout from its Metadata container (for further processing)
+ layout, ok := layoutEnv.GetPayload().(Layout)
+ if !ok {
+ return nil, ErrNotLayout
+ }
+
+ // Verify layout expiration
+ if err := VerifyLayoutExpiration(layout); err != nil {
+ return nil, err
+ }
+
+ // Substitute parameters in layout
+ layout, err := SubstituteParameters(layout, parameterDictionary)
+ if err != nil {
+ return nil, err
+ }
+
+ rootCertPool, intermediateCertPool, err := LoadLayoutCertificates(layout, intermediatePems)
+ if err != nil {
+ return nil, err
+ }
+
+ // Load links for layout
+ stepsMetadata, err := LoadLinksForLayout(layout, linkDir)
+ if err != nil {
+ return nil, err
+ }
+
+ // Verify link signatures
+ stepsMetadataVerified, err := VerifyLinkSignatureThesholds(layout,
+ stepsMetadata, rootCertPool, intermediateCertPool)
+ if err != nil {
+ return nil, err
+ }
+
+ // Verify and resolve sublayouts
+ stepsSublayoutVerified, err := VerifySublayouts(layout,
+ stepsMetadataVerified, linkDir, intermediatePems, lineNormalization)
+ if err != nil {
+ return nil, err
+ }
+
+ // Verify command alignment (WARNING only)
+ VerifyStepCommandAlignment(layout, stepsSublayoutVerified)
+
+ // Given that signature thresholds have been checked above and the rest of
+ // the relevant link properties, i.e. materials and products, have to be
+ // exactly equal, we can reduce the map of steps metadata. However, we error
+ // if the relevant properties are not equal among links of a step.
+ stepsMetadataReduced, err := ReduceStepsMetadata(layout,
+ stepsSublayoutVerified)
+ if err != nil {
+ return nil, err
+ }
+
+ // Verify artifact rules
+ if err = VerifyArtifacts(layout.stepsAsInterfaceSlice(),
+ stepsMetadataReduced); err != nil {
+ return nil, err
+ }
+
+ inspectionMetadata, err := RunInspections(layout, "", lineNormalization, useDSSE)
+ if err != nil {
+ return nil, err
+ }
+
+ // Add steps metadata to inspection metadata, because inspection artifact
+ // rules may also refer to artifacts reported by step links
+ for k, v := range stepsMetadataReduced {
+ inspectionMetadata[k] = v
+ }
+
+ if err = VerifyArtifacts(layout.inspectAsInterfaceSlice(),
+ inspectionMetadata); err != nil {
+ return nil, err
+ }
+
+ summaryLink, err := GetSummaryLink(layout, stepsMetadataReduced, stepName, useDSSE)
+ if err != nil {
+ return nil, err
+ }
+
+ return summaryLink, nil
+}
+
+/*
+InTotoVerifyWithDirectory provides the same functionality as InTotoVerify, but
+adds the possibility to select a local directory from where the inspections are run.
+*/
+func InTotoVerifyWithDirectory(layoutEnv Metadata, layoutKeys map[string]Key,
+ linkDir string, runDir string, stepName string, parameterDictionary map[string]string, intermediatePems [][]byte, lineNormalization bool) (
+ Metadata, error) {
+
+ // runDir sanity checks
+ // check if path exists
+ info, err := os.Stat(runDir)
+ if err != nil {
+ return nil, err
+ }
+
+ // check if runDir is a symlink
+ if info.Mode()&os.ModeSymlink == os.ModeSymlink {
+ return nil, ErrInspectionRunDirIsSymlink
+ }
+
+ // check if runDir is writable and a directory
+ err = isWritable(runDir)
+ if err != nil {
+ return nil, err
+ }
+
+ // check if runDir is empty (we do not want to overwrite files)
+ // We abuse File.Readdirnames for this action.
+ f, err := os.Open(runDir)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ // We use Readdirnames(1) for performance reasons, one child node
+ // is enough to proof that the directory is not empty
+ _, err = f.Readdirnames(1)
+ // if io.EOF gets returned as error the directory is empty
+ if err == io.EOF {
+ return nil, err
+ }
+ err = f.Close()
+ if err != nil {
+ return nil, err
+ }
+
+ // Verify root signatures
+ if err := VerifyLayoutSignatures(layoutEnv, layoutKeys); err != nil {
+ return nil, err
+ }
+
+ useDSSE := false
+ if _, ok := layoutEnv.(*Envelope); ok {
+ useDSSE = true
+ }
+
+ // Extract the layout from its Metadata container (for further processing)
+ layout, ok := layoutEnv.GetPayload().(Layout)
+ if !ok {
+ return nil, ErrNotLayout
+ }
+
+ // Verify layout expiration
+ if err := VerifyLayoutExpiration(layout); err != nil {
+ return nil, err
+ }
+
+ // Substitute parameters in layout
+ layout, err = SubstituteParameters(layout, parameterDictionary)
+ if err != nil {
+ return nil, err
+ }
+
+ rootCertPool, intermediateCertPool, err := LoadLayoutCertificates(layout, intermediatePems)
+ if err != nil {
+ return nil, err
+ }
+
+ // Load links for layout
+ stepsMetadata, err := LoadLinksForLayout(layout, linkDir)
+ if err != nil {
+ return nil, err
+ }
+
+ // Verify link signatures
+ stepsMetadataVerified, err := VerifyLinkSignatureThesholds(layout,
+ stepsMetadata, rootCertPool, intermediateCertPool)
+ if err != nil {
+ return nil, err
+ }
+
+ // Verify and resolve sublayouts
+ stepsSublayoutVerified, err := VerifySublayouts(layout,
+ stepsMetadataVerified, linkDir, intermediatePems, lineNormalization)
+ if err != nil {
+ return nil, err
+ }
+
+ // Verify command alignment (WARNING only)
+ VerifyStepCommandAlignment(layout, stepsSublayoutVerified)
+
+ // Given that signature thresholds have been checked above and the rest of
+ // the relevant link properties, i.e. materials and products, have to be
+ // exactly equal, we can reduce the map of steps metadata. However, we error
+ // if the relevant properties are not equal among links of a step.
+ stepsMetadataReduced, err := ReduceStepsMetadata(layout,
+ stepsSublayoutVerified)
+ if err != nil {
+ return nil, err
+ }
+
+ // Verify artifact rules
+ if err = VerifyArtifacts(layout.stepsAsInterfaceSlice(),
+ stepsMetadataReduced); err != nil {
+ return nil, err
+ }
+
+ inspectionMetadata, err := RunInspections(layout, runDir, lineNormalization, useDSSE)
+ if err != nil {
+ return nil, err
+ }
+
+ // Add steps metadata to inspection metadata, because inspection artifact
+ // rules may also refer to artifacts reported by step links
+ for k, v := range stepsMetadataReduced {
+ inspectionMetadata[k] = v
+ }
+
+ if err = VerifyArtifacts(layout.inspectAsInterfaceSlice(),
+ inspectionMetadata); err != nil {
+ return nil, err
+ }
+
+ summaryLink, err := GetSummaryLink(layout, stepsMetadataReduced, stepName, useDSSE)
+ if err != nil {
+ return nil, err
+ }
+
+ return summaryLink, nil
+}
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 244ee19c4bf..af2ef639536 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -27,6 +27,16 @@ Use the links above for more information on each.
# changelog
+* Oct 20, 2025 - [1.18.1](https://github.com/klauspost/compress/releases/tag/v1.18.1)
+ * zstd: Add simple zstd EncodeTo/DecodeTo functions https://github.com/klauspost/compress/pull/1079
+ * zstd: Fix incorrect buffer size in dictionary encodes https://github.com/klauspost/compress/pull/1059
+ * s2: check for cap, not len of buffer in EncodeBetter/Best by @vdarulis in https://github.com/klauspost/compress/pull/1080
+ * zlib: Avoiding extra allocation in zlib.reader.Reset by @travelpolicy in https://github.com/klauspost/compress/pull/1086
+ * gzhttp: remove redundant err check in zstdReader by @ryanfowler in https://github.com/klauspost/compress/pull/1090
+ * flate: Faster load+store https://github.com/klauspost/compress/pull/1104
+ * flate: Simplify matchlen https://github.com/klauspost/compress/pull/1101
+ * flate: Use exact sizes for huffman tables https://github.com/klauspost/compress/pull/1103
+
* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0)
* Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036
* fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028
@@ -36,6 +46,9 @@ Use the links above for more information on each.
* flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049
* flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050
+
+ See changes to v1.17.x
+
* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11)
* zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017
* s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014
@@ -102,7 +115,8 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839
* flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837
* gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860
-
+
+
See changes to v1.16.x
@@ -669,3 +683,4 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv
# license
This code is licensed under the same conditions as the original Go code. See LICENSE file.
+
diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go
index 4e92f5998a8..57d17eeab9e 100644
--- a/vendor/github.com/klauspost/compress/flate/deflate.go
+++ b/vendor/github.com/klauspost/compress/flate/deflate.go
@@ -421,7 +421,9 @@ func (d *compressor) deflateLazy() {
d.h = newHuffmanEncoder(maxFlateBlockTokens)
}
var tmp [256]uint16
- for _, v := range d.window[s.index:d.windowEnd] {
+ toIndex := d.window[s.index:d.windowEnd]
+ toIndex = toIndex[:min(len(toIndex), maxFlateBlockTokens)]
+ for _, v := range toIndex {
tmp[v]++
}
d.h.generate(tmp[:], 15)
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
index 03a1796979b..7151140ccd7 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -646,7 +646,7 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
w.lastHeader = 0
}
- numLiterals, numOffsets := w.indexTokens(tokens, fillReuse && !sync)
+ numLiterals, numOffsets := w.indexTokens(tokens, true)
extraBits := 0
ssize, storable := w.storedSize(input)
@@ -781,7 +781,7 @@ func (w *huffmanBitWriter) fillTokens() {
// literalFreq and offsetFreq, and generates literalEncoding
// and offsetEncoding.
// The number of literal and offset tokens is returned.
-func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) {
+func (w *huffmanBitWriter) indexTokens(t *tokens, alwaysEOB bool) (numLiterals, numOffsets int) {
//copy(w.literalFreq[:], t.litHist[:])
*(*[256]uint16)(w.literalFreq[:]) = t.litHist
//copy(w.literalFreq[256:], t.extraHist[:])
@@ -791,9 +791,10 @@ func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, num
if t.n == 0 {
return
}
- if filled {
- return maxNumLit, maxNumDist
+ if alwaysEOB {
+ w.literalFreq[endBlockMarker] = 1
}
+
// get the number of literals
numLiterals = len(w.literalFreq)
for w.literalFreq[numLiterals-1] == 0 {
diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go
index 90b74f7acdd..455ed3e2b56 100644
--- a/vendor/github.com/klauspost/compress/flate/stateless.go
+++ b/vendor/github.com/klauspost/compress/flate/stateless.go
@@ -61,13 +61,19 @@ var bitWriterPool = sync.Pool{
},
}
+// tokensPool contains tokens struct objects that can be reused
+var tokensPool = sync.Pool{
+ New: func() any {
+ return &tokens{}
+ },
+}
+
// StatelessDeflate allows compressing directly to a Writer without retaining state.
// When returning everything will be flushed.
// Up to 8KB of an optional dictionary can be given which is presumed to precede the block.
// Longer dictionaries will be truncated and will still produce valid output.
// Sending nil dictionary is perfectly fine.
func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
- var dst tokens
bw := bitWriterPool.Get().(*huffmanBitWriter)
bw.reset(out)
defer func() {
@@ -91,6 +97,12 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
// For subsequent loops, keep shallow dict reference to avoid alloc+copy.
var inDict []byte
+ dst := tokensPool.Get().(*tokens)
+ dst.Reset()
+ defer func() {
+ tokensPool.Put(dst)
+ }()
+
for len(in) > 0 {
todo := in
if len(inDict) > 0 {
@@ -113,9 +125,9 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
}
// Compress
if len(inDict) == 0 {
- statelessEnc(&dst, todo, int16(len(dict)))
+ statelessEnc(dst, todo, int16(len(dict)))
} else {
- statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict)
+ statelessEnc(dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict)
}
isEof := eof && len(in) == 0
@@ -129,7 +141,7 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
// If we removed less than 1/16th, huffman compress the block.
bw.writeBlockHuff(isEof, uncompressed, len(in) == 0)
} else {
- bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0)
+ bw.writeBlockDynamic(dst, isEof, uncompressed, len(in) == 0)
}
if len(in) > 0 {
// Retain a dict if we have more
diff --git a/vendor/github.com/oklog/ulid/.gitignore b/vendor/github.com/oklog/ulid/.gitignore
new file mode 100644
index 00000000000..c92c4d56084
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/.gitignore
@@ -0,0 +1,29 @@
+#### joe made this: http://goel.io/joe
+
+#####=== Go ===#####
+
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+
diff --git a/vendor/github.com/oklog/ulid/.travis.yml b/vendor/github.com/oklog/ulid/.travis.yml
new file mode 100644
index 00000000000..43eb762fa34
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/.travis.yml
@@ -0,0 +1,16 @@
+language: go
+sudo: false
+go:
+ - 1.10.x
+install:
+ - go get -v github.com/golang/lint/golint
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+ - go get -d -t -v ./...
+ - go build -v ./...
+script:
+ - go vet ./...
+ - $HOME/gopath/bin/golint .
+ - go test -v -race ./...
+ - go test -v -covermode=count -coverprofile=cov.out
+ - $HOME/gopath/bin/goveralls -coverprofile=cov.out -service=travis-ci -repotoken "$COVERALLS_TOKEN" || true
diff --git a/vendor/github.com/oklog/ulid/AUTHORS.md b/vendor/github.com/oklog/ulid/AUTHORS.md
new file mode 100644
index 00000000000..95581c78b06
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/AUTHORS.md
@@ -0,0 +1,2 @@
+- Peter Bourgon (@peterbourgon)
+- Tomás Senart (@tsenart)
diff --git a/vendor/github.com/oklog/ulid/CHANGELOG.md b/vendor/github.com/oklog/ulid/CHANGELOG.md
new file mode 100644
index 00000000000..8da38c6b00d
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/CHANGELOG.md
@@ -0,0 +1,33 @@
+## 1.3.1 / 2018-10-02
+
+* Use underlying entropy source for random increments in Monotonic (#32)
+
+## 1.3.0 / 2018-09-29
+
+* Monotonic entropy support (#31)
+
+## 1.2.0 / 2018-09-09
+
+* Add a function to convert Unix time in milliseconds back to time.Time (#30)
+
+## 1.1.0 / 2018-08-15
+
+* Ensure random part is always read from the entropy reader in full (#28)
+
+## 1.0.0 / 2018-07-29
+
+* Add ParseStrict and MustParseStrict functions (#26)
+* Enforce overflow checking when parsing (#20)
+
+## 0.3.0 / 2017-01-03
+
+* Implement ULID.Compare method
+
+## 0.2.0 / 2016-12-13
+
+* Remove year 2262 Timestamp bug. (#1)
+* Gracefully handle invalid encodings when parsing.
+
+## 0.1.0 / 2016-12-06
+
+* First ULID release
diff --git a/vendor/github.com/oklog/ulid/CONTRIBUTING.md b/vendor/github.com/oklog/ulid/CONTRIBUTING.md
new file mode 100644
index 00000000000..68f03f26eba
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/CONTRIBUTING.md
@@ -0,0 +1,17 @@
+# Contributing
+
+We use GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull
+ request, addressing (with `@...`) one or more of the maintainers
+ (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first propose your ideas
+ in a Github issue. This will avoid unnecessary work and surely give
+ you and us a good deal of inspiration.
+
+* Relevant coding style guidelines are the [Go Code Review
+ Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+ Practices for Production
+ Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/vendor/github.com/oklog/ulid/Gopkg.lock b/vendor/github.com/oklog/ulid/Gopkg.lock
new file mode 100644
index 00000000000..349b449a6ea
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/Gopkg.lock
@@ -0,0 +1,15 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ branch = "master"
+ name = "github.com/pborman/getopt"
+ packages = ["v2"]
+ revision = "7148bc3a4c3008adfcab60cbebfd0576018f330b"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ inputs-digest = "6779b05abd5cd429c5393641d2453005a3cb74a400d161b2b5c5d0ca2e10e116"
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/vendor/github.com/oklog/ulid/Gopkg.toml b/vendor/github.com/oklog/ulid/Gopkg.toml
new file mode 100644
index 00000000000..624a7a019c7
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/Gopkg.toml
@@ -0,0 +1,26 @@
+
+# Gopkg.toml example
+#
+# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+# name = "github.com/user/project"
+# version = "1.0.0"
+#
+# [[constraint]]
+# name = "github.com/user/project2"
+# branch = "dev"
+# source = "github.com/myfork/project2"
+#
+# [[override]]
+# name = "github.com/x/y"
+# version = "2.4.0"
+
+
+[[constraint]]
+ branch = "master"
+ name = "github.com/pborman/getopt"
diff --git a/vendor/github.com/oklog/ulid/LICENSE b/vendor/github.com/oklog/ulid/LICENSE
new file mode 100644
index 00000000000..261eeb9e9f8
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/oklog/ulid/README.md b/vendor/github.com/oklog/ulid/README.md
new file mode 100644
index 00000000000..0a3d2f82b25
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/README.md
@@ -0,0 +1,150 @@
+# Universally Unique Lexicographically Sortable Identifier
+
+
+[](http://travis-ci.org/oklog/ulid)
+[](https://goreportcard.com/report/oklog/ulid)
+[](https://coveralls.io/github/oklog/ulid?branch=master)
+[](https://godoc.org/github.com/oklog/ulid)
+[](https://raw.githubusercontent.com/oklog/ulid/master/LICENSE)
+
+A Go port of [alizain/ulid](https://github.com/alizain/ulid) with binary format implemented.
+
+## Background
+
+A GUID/UUID can be suboptimal for many use-cases because:
+
+- It isn't the most character efficient way of encoding 128 bits
+- UUID v1/v2 is impractical in many environments, as it requires access to a unique, stable MAC address
+- UUID v3/v5 requires a unique seed and produces randomly distributed IDs, which can cause fragmentation in many data structures
+- UUID v4 provides no other information than randomness which can cause fragmentation in many data structures
+
+A ULID however:
+
+- Is compatible with UUID/GUID's
+- 1.21e+24 unique ULIDs per millisecond (1,208,925,819,614,629,174,706,176 to be exact)
+- Lexicographically sortable
+- Canonically encoded as a 26 character string, as opposed to the 36 character UUID
+- Uses Crockford's base32 for better efficiency and readability (5 bits per character)
+- Case insensitive
+- No special characters (URL safe)
+- Monotonic sort order (correctly detects and handles the same millisecond)
+
+## Install
+
+```shell
+go get github.com/oklog/ulid
+```
+
+## Usage
+
+An ULID is constructed with a `time.Time` and an `io.Reader` entropy source.
+This design allows for greater flexibility in choosing your trade-offs.
+
+Please note that `rand.Rand` from the `math` package is *not* safe for concurrent use.
+Instantiate one per long living go-routine or use a `sync.Pool` if you want to avoid the potential contention of a locked `rand.Source` as its been frequently observed in the package level functions.
+
+
+```go
+func ExampleULID() {
+ t := time.Unix(1000000, 0)
+ entropy := ulid.Monotonic(rand.New(rand.NewSource(t.UnixNano())), 0)
+ fmt.Println(ulid.MustNew(ulid.Timestamp(t), entropy))
+ // Output: 0000XSNJG0MQJHBF4QX1EFD6Y3
+}
+
+```
+
+## Specification
+
+Below is the current specification of ULID as implemented in this repository.
+
+### Components
+
+**Timestamp**
+- 48 bits
+- UNIX-time in milliseconds
+- Won't run out of space till the year 10895 AD
+
+**Entropy**
+- 80 bits
+- User defined entropy source.
+- Monotonicity within the same millisecond with [`ulid.Monotonic`](https://godoc.org/github.com/oklog/ulid#Monotonic)
+
+### Encoding
+
+[Crockford's Base32](http://www.crockford.com/wrmg/base32.html) is used as shown.
+This alphabet excludes the letters I, L, O, and U to avoid confusion and abuse.
+
+```
+0123456789ABCDEFGHJKMNPQRSTVWXYZ
+```
+
+### Binary Layout and Byte Order
+
+The components are encoded as 16 octets. Each component is encoded with the Most Significant Byte first (network byte order).
+
+```
+0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| 32_bit_uint_time_high |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| 16_bit_uint_time_low | 16_bit_uint_random |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| 32_bit_uint_random |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| 32_bit_uint_random |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+```
+
+### String Representation
+
+```
+ 01AN4Z07BY 79KA1307SR9X4MV3
+|----------| |----------------|
+ Timestamp Entropy
+ 10 chars 16 chars
+ 48bits 80bits
+ base32 base32
+```
+
+## Test
+
+```shell
+go test ./...
+```
+
+## Benchmarks
+
+On a Intel Core i7 Ivy Bridge 2.7 GHz, MacOS 10.12.1 and Go 1.8.0beta1
+
+```
+BenchmarkNew/WithCryptoEntropy-8 2000000 771 ns/op 20.73 MB/s 16 B/op 1 allocs/op
+BenchmarkNew/WithEntropy-8 20000000 65.8 ns/op 243.01 MB/s 16 B/op 1 allocs/op
+BenchmarkNew/WithoutEntropy-8 50000000 30.0 ns/op 534.06 MB/s 16 B/op 1 allocs/op
+BenchmarkMustNew/WithCryptoEntropy-8 2000000 781 ns/op 20.48 MB/s 16 B/op 1 allocs/op
+BenchmarkMustNew/WithEntropy-8 20000000 70.0 ns/op 228.51 MB/s 16 B/op 1 allocs/op
+BenchmarkMustNew/WithoutEntropy-8 50000000 34.6 ns/op 462.98 MB/s 16 B/op 1 allocs/op
+BenchmarkParse-8 50000000 30.0 ns/op 866.16 MB/s 0 B/op 0 allocs/op
+BenchmarkMustParse-8 50000000 35.2 ns/op 738.94 MB/s 0 B/op 0 allocs/op
+BenchmarkString-8 20000000 64.9 ns/op 246.40 MB/s 32 B/op 1 allocs/op
+BenchmarkMarshal/Text-8 20000000 55.8 ns/op 286.84 MB/s 32 B/op 1 allocs/op
+BenchmarkMarshal/TextTo-8 100000000 22.4 ns/op 714.91 MB/s 0 B/op 0 allocs/op
+BenchmarkMarshal/Binary-8 300000000 4.02 ns/op 3981.77 MB/s 0 B/op 0 allocs/op
+BenchmarkMarshal/BinaryTo-8 2000000000 1.18 ns/op 13551.75 MB/s 0 B/op 0 allocs/op
+BenchmarkUnmarshal/Text-8 100000000 20.5 ns/op 1265.27 MB/s 0 B/op 0 allocs/op
+BenchmarkUnmarshal/Binary-8 300000000 4.94 ns/op 3240.01 MB/s 0 B/op 0 allocs/op
+BenchmarkNow-8 100000000 15.1 ns/op 528.09 MB/s 0 B/op 0 allocs/op
+BenchmarkTimestamp-8 2000000000 0.29 ns/op 27271.59 MB/s 0 B/op 0 allocs/op
+BenchmarkTime-8 2000000000 0.58 ns/op 13717.80 MB/s 0 B/op 0 allocs/op
+BenchmarkSetTime-8 2000000000 0.89 ns/op 9023.95 MB/s 0 B/op 0 allocs/op
+BenchmarkEntropy-8 200000000 7.62 ns/op 1311.66 MB/s 0 B/op 0 allocs/op
+BenchmarkSetEntropy-8 2000000000 0.88 ns/op 11376.54 MB/s 0 B/op 0 allocs/op
+BenchmarkCompare-8 200000000 7.34 ns/op 4359.23 MB/s 0 B/op 0 allocs/op
+```
+
+## Prior Art
+
+- [alizain/ulid](https://github.com/alizain/ulid)
+- [RobThree/NUlid](https://github.com/RobThree/NUlid)
+- [imdario/go-ulid](https://github.com/imdario/go-ulid)
diff --git a/vendor/github.com/oklog/ulid/ulid.go b/vendor/github.com/oklog/ulid/ulid.go
new file mode 100644
index 00000000000..c5d0d66fd2a
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/ulid.go
@@ -0,0 +1,614 @@
+// Copyright 2016 The Oklog Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ulid
+
+import (
+ "bufio"
+ "bytes"
+ "database/sql/driver"
+ "encoding/binary"
+ "errors"
+ "io"
+ "math"
+ "math/bits"
+ "math/rand"
+ "time"
+)
+
+/*
+An ULID is a 16 byte Universally Unique Lexicographically Sortable Identifier
+
+ The components are encoded as 16 octets.
+ Each component is encoded with the MSB first (network byte order).
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | 32_bit_uint_time_high |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | 16_bit_uint_time_low | 16_bit_uint_random |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | 32_bit_uint_random |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | 32_bit_uint_random |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+type ULID [16]byte
+
+var (
+ // ErrDataSize is returned when parsing or unmarshaling ULIDs with the wrong
+ // data size.
+ ErrDataSize = errors.New("ulid: bad data size when unmarshaling")
+
+ // ErrInvalidCharacters is returned when parsing or unmarshaling ULIDs with
+ // invalid Base32 encodings.
+ ErrInvalidCharacters = errors.New("ulid: bad data characters when unmarshaling")
+
+ // ErrBufferSize is returned when marshalling ULIDs to a buffer of insufficient
+ // size.
+ ErrBufferSize = errors.New("ulid: bad buffer size when marshaling")
+
+ // ErrBigTime is returned when constructing an ULID with a time that is larger
+ // than MaxTime.
+ ErrBigTime = errors.New("ulid: time too big")
+
+ // ErrOverflow is returned when unmarshaling a ULID whose first character is
+ // larger than 7, thereby exceeding the valid bit depth of 128.
+ ErrOverflow = errors.New("ulid: overflow when unmarshaling")
+
+ // ErrMonotonicOverflow is returned by a Monotonic entropy source when
+ // incrementing the previous ULID's entropy bytes would result in overflow.
+ ErrMonotonicOverflow = errors.New("ulid: monotonic entropy overflow")
+
+ // ErrScanValue is returned when the value passed to scan cannot be unmarshaled
+ // into the ULID.
+ ErrScanValue = errors.New("ulid: source value must be a string or byte slice")
+)
+
+// New returns an ULID with the given Unix milliseconds timestamp and an
+// optional entropy source. Use the Timestamp function to convert
+// a time.Time to Unix milliseconds.
+//
+// ErrBigTime is returned when passing a timestamp bigger than MaxTime.
+// Reading from the entropy source may also return an error.
+func New(ms uint64, entropy io.Reader) (id ULID, err error) {
+ if err = id.SetTime(ms); err != nil {
+ return id, err
+ }
+
+ switch e := entropy.(type) {
+ case nil:
+ return id, err
+ case *monotonic:
+ err = e.MonotonicRead(ms, id[6:])
+ default:
+ _, err = io.ReadFull(e, id[6:])
+ }
+
+ return id, err
+}
+
+// MustNew is a convenience function equivalent to New that panics on failure
+// instead of returning an error.
+func MustNew(ms uint64, entropy io.Reader) ULID {
+ id, err := New(ms, entropy)
+ if err != nil {
+ panic(err)
+ }
+ return id
+}
+
+// Parse parses an encoded ULID, returning an error in case of failure.
+//
+// ErrDataSize is returned if the len(ulid) is different from an encoded
+// ULID's length. Invalid encodings produce undefined ULIDs. For a version that
+// returns an error instead, see ParseStrict.
+func Parse(ulid string) (id ULID, err error) {
+ return id, parse([]byte(ulid), false, &id)
+}
+
+// ParseStrict parses an encoded ULID, returning an error in case of failure.
+//
+// It is like Parse, but additionally validates that the parsed ULID consists
+// only of valid base32 characters. It is slightly slower than Parse.
+//
+// ErrDataSize is returned if the len(ulid) is different from an encoded
+// ULID's length. Invalid encodings return ErrInvalidCharacters.
+func ParseStrict(ulid string) (id ULID, err error) {
+ return id, parse([]byte(ulid), true, &id)
+}
+
+func parse(v []byte, strict bool, id *ULID) error {
+ // Check if a base32 encoded ULID is the right length.
+ if len(v) != EncodedSize {
+ return ErrDataSize
+ }
+
+ // Check if all the characters in a base32 encoded ULID are part of the
+ // expected base32 character set.
+ if strict &&
+ (dec[v[0]] == 0xFF ||
+ dec[v[1]] == 0xFF ||
+ dec[v[2]] == 0xFF ||
+ dec[v[3]] == 0xFF ||
+ dec[v[4]] == 0xFF ||
+ dec[v[5]] == 0xFF ||
+ dec[v[6]] == 0xFF ||
+ dec[v[7]] == 0xFF ||
+ dec[v[8]] == 0xFF ||
+ dec[v[9]] == 0xFF ||
+ dec[v[10]] == 0xFF ||
+ dec[v[11]] == 0xFF ||
+ dec[v[12]] == 0xFF ||
+ dec[v[13]] == 0xFF ||
+ dec[v[14]] == 0xFF ||
+ dec[v[15]] == 0xFF ||
+ dec[v[16]] == 0xFF ||
+ dec[v[17]] == 0xFF ||
+ dec[v[18]] == 0xFF ||
+ dec[v[19]] == 0xFF ||
+ dec[v[20]] == 0xFF ||
+ dec[v[21]] == 0xFF ||
+ dec[v[22]] == 0xFF ||
+ dec[v[23]] == 0xFF ||
+ dec[v[24]] == 0xFF ||
+ dec[v[25]] == 0xFF) {
+ return ErrInvalidCharacters
+ }
+
+ // Check if the first character in a base32 encoded ULID will overflow. This
+ // happens because the base32 representation encodes 130 bits, while the
+ // ULID is only 128 bits.
+ //
+ // See https://github.com/oklog/ulid/issues/9 for details.
+ if v[0] > '7' {
+ return ErrOverflow
+ }
+
+ // Use an optimized unrolled loop (from https://github.com/RobThree/NUlid)
+ // to decode a base32 ULID.
+
+ // 6 bytes timestamp (48 bits)
+ (*id)[0] = ((dec[v[0]] << 5) | dec[v[1]])
+ (*id)[1] = ((dec[v[2]] << 3) | (dec[v[3]] >> 2))
+ (*id)[2] = ((dec[v[3]] << 6) | (dec[v[4]] << 1) | (dec[v[5]] >> 4))
+ (*id)[3] = ((dec[v[5]] << 4) | (dec[v[6]] >> 1))
+ (*id)[4] = ((dec[v[6]] << 7) | (dec[v[7]] << 2) | (dec[v[8]] >> 3))
+ (*id)[5] = ((dec[v[8]] << 5) | dec[v[9]])
+
+ // 10 bytes of entropy (80 bits)
+ (*id)[6] = ((dec[v[10]] << 3) | (dec[v[11]] >> 2))
+ (*id)[7] = ((dec[v[11]] << 6) | (dec[v[12]] << 1) | (dec[v[13]] >> 4))
+ (*id)[8] = ((dec[v[13]] << 4) | (dec[v[14]] >> 1))
+ (*id)[9] = ((dec[v[14]] << 7) | (dec[v[15]] << 2) | (dec[v[16]] >> 3))
+ (*id)[10] = ((dec[v[16]] << 5) | dec[v[17]])
+ (*id)[11] = ((dec[v[18]] << 3) | dec[v[19]]>>2)
+ (*id)[12] = ((dec[v[19]] << 6) | (dec[v[20]] << 1) | (dec[v[21]] >> 4))
+ (*id)[13] = ((dec[v[21]] << 4) | (dec[v[22]] >> 1))
+ (*id)[14] = ((dec[v[22]] << 7) | (dec[v[23]] << 2) | (dec[v[24]] >> 3))
+ (*id)[15] = ((dec[v[24]] << 5) | dec[v[25]])
+
+ return nil
+}
+
+// MustParse is a convenience function equivalent to Parse that panics on failure
+// instead of returning an error.
+func MustParse(ulid string) ULID {
+ id, err := Parse(ulid)
+ if err != nil {
+ panic(err)
+ }
+ return id
+}
+
+// MustParseStrict is a convenience function equivalent to ParseStrict that
+// panics on failure instead of returning an error.
+func MustParseStrict(ulid string) ULID {
+ id, err := ParseStrict(ulid)
+ if err != nil {
+ panic(err)
+ }
+ return id
+}
+
+// String returns a lexicographically sortable string encoded ULID
+// (26 characters, non-standard base 32) e.g. 01AN4Z07BY79KA1307SR9X4MV3
+// Format: tttttttttteeeeeeeeeeeeeeee where t is time and e is entropy
+func (id ULID) String() string {
+ ulid := make([]byte, EncodedSize)
+ _ = id.MarshalTextTo(ulid)
+ return string(ulid)
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface by
+// returning the ULID as a byte slice.
+func (id ULID) MarshalBinary() ([]byte, error) {
+ ulid := make([]byte, len(id))
+ return ulid, id.MarshalBinaryTo(ulid)
+}
+
+// MarshalBinaryTo writes the binary encoding of the ULID to the given buffer.
+// ErrBufferSize is returned when the len(dst) != 16.
+func (id ULID) MarshalBinaryTo(dst []byte) error {
+ if len(dst) != len(id) {
+ return ErrBufferSize
+ }
+
+ copy(dst, id[:])
+ return nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface by
+// copying the passed data and converting it to an ULID. ErrDataSize is
+// returned if the data length is different from ULID length.
+func (id *ULID) UnmarshalBinary(data []byte) error {
+ if len(data) != len(*id) {
+ return ErrDataSize
+ }
+
+ copy((*id)[:], data)
+ return nil
+}
+
+// Encoding is the base 32 encoding alphabet used in ULID strings.
+const Encoding = "0123456789ABCDEFGHJKMNPQRSTVWXYZ"
+
+// MarshalText implements the encoding.TextMarshaler interface by
+// returning the string encoded ULID.
+func (id ULID) MarshalText() ([]byte, error) {
+ ulid := make([]byte, EncodedSize)
+ return ulid, id.MarshalTextTo(ulid)
+}
+
+// MarshalTextTo writes the ULID as a string to the given buffer.
+// ErrBufferSize is returned when the len(dst) != 26.
+func (id ULID) MarshalTextTo(dst []byte) error {
+ // Optimized unrolled loop ahead.
+ // From https://github.com/RobThree/NUlid
+
+ if len(dst) != EncodedSize {
+ return ErrBufferSize
+ }
+
+ // 10 byte timestamp
+ dst[0] = Encoding[(id[0]&224)>>5]
+ dst[1] = Encoding[id[0]&31]
+ dst[2] = Encoding[(id[1]&248)>>3]
+ dst[3] = Encoding[((id[1]&7)<<2)|((id[2]&192)>>6)]
+ dst[4] = Encoding[(id[2]&62)>>1]
+ dst[5] = Encoding[((id[2]&1)<<4)|((id[3]&240)>>4)]
+ dst[6] = Encoding[((id[3]&15)<<1)|((id[4]&128)>>7)]
+ dst[7] = Encoding[(id[4]&124)>>2]
+ dst[8] = Encoding[((id[4]&3)<<3)|((id[5]&224)>>5)]
+ dst[9] = Encoding[id[5]&31]
+
+ // 16 bytes of entropy
+ dst[10] = Encoding[(id[6]&248)>>3]
+ dst[11] = Encoding[((id[6]&7)<<2)|((id[7]&192)>>6)]
+ dst[12] = Encoding[(id[7]&62)>>1]
+ dst[13] = Encoding[((id[7]&1)<<4)|((id[8]&240)>>4)]
+ dst[14] = Encoding[((id[8]&15)<<1)|((id[9]&128)>>7)]
+ dst[15] = Encoding[(id[9]&124)>>2]
+ dst[16] = Encoding[((id[9]&3)<<3)|((id[10]&224)>>5)]
+ dst[17] = Encoding[id[10]&31]
+ dst[18] = Encoding[(id[11]&248)>>3]
+ dst[19] = Encoding[((id[11]&7)<<2)|((id[12]&192)>>6)]
+ dst[20] = Encoding[(id[12]&62)>>1]
+ dst[21] = Encoding[((id[12]&1)<<4)|((id[13]&240)>>4)]
+ dst[22] = Encoding[((id[13]&15)<<1)|((id[14]&128)>>7)]
+ dst[23] = Encoding[(id[14]&124)>>2]
+ dst[24] = Encoding[((id[14]&3)<<3)|((id[15]&224)>>5)]
+ dst[25] = Encoding[id[15]&31]
+
+ return nil
+}
+
+// Byte to index table for O(1) lookups when unmarshaling.
+// We use 0xFF as sentinel value for invalid indexes.
+var dec = [...]byte{
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
+ 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF,
+ 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E,
+ 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C,
+ 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14,
+ 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C,
+ 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+}
+
+// EncodedSize is the length of a text encoded ULID.
+const EncodedSize = 26
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface by
+// parsing the data as string encoded ULID.
+//
+// ErrDataSize is returned if the len(v) is different from an encoded
+// ULID's length. Invalid encodings produce undefined ULIDs.
+func (id *ULID) UnmarshalText(v []byte) error {
+ return parse(v, false, id)
+}
+
+// Time returns the Unix time in milliseconds encoded in the ULID.
+// Use the top level Time function to convert the returned value to
+// a time.Time.
+func (id ULID) Time() uint64 {
+ return uint64(id[5]) | uint64(id[4])<<8 |
+ uint64(id[3])<<16 | uint64(id[2])<<24 |
+ uint64(id[1])<<32 | uint64(id[0])<<40
+}
+
+// maxTime is the maximum Unix time in milliseconds that can be
+// represented in an ULID.
+var maxTime = ULID{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}.Time()
+
+// MaxTime returns the maximum Unix time in milliseconds that
+// can be encoded in an ULID.
+func MaxTime() uint64 { return maxTime }
+
+// Now is a convenience function that returns the current
+// UTC time in Unix milliseconds. Equivalent to:
+// Timestamp(time.Now().UTC())
+func Now() uint64 { return Timestamp(time.Now().UTC()) }
+
+// Timestamp converts a time.Time to Unix milliseconds.
+//
+// Because of the way ULID stores time, times from the year
+// 10889 produces undefined results.
+func Timestamp(t time.Time) uint64 {
+ return uint64(t.Unix())*1000 +
+ uint64(t.Nanosecond()/int(time.Millisecond))
+}
+
+// Time converts Unix milliseconds in the format
+// returned by the Timestamp function to a time.Time.
+func Time(ms uint64) time.Time {
+ s := int64(ms / 1e3)
+ ns := int64((ms % 1e3) * 1e6)
+ return time.Unix(s, ns)
+}
+
+// SetTime sets the time component of the ULID to the given Unix time
+// in milliseconds.
+func (id *ULID) SetTime(ms uint64) error {
+ if ms > maxTime {
+ return ErrBigTime
+ }
+
+ (*id)[0] = byte(ms >> 40)
+ (*id)[1] = byte(ms >> 32)
+ (*id)[2] = byte(ms >> 24)
+ (*id)[3] = byte(ms >> 16)
+ (*id)[4] = byte(ms >> 8)
+ (*id)[5] = byte(ms)
+
+ return nil
+}
+
+// Entropy returns the entropy from the ULID.
+func (id ULID) Entropy() []byte {
+ e := make([]byte, 10)
+ copy(e, id[6:])
+ return e
+}
+
+// SetEntropy sets the ULID entropy to the passed byte slice.
+// ErrDataSize is returned if len(e) != 10.
+func (id *ULID) SetEntropy(e []byte) error {
+ if len(e) != 10 {
+ return ErrDataSize
+ }
+
+ copy((*id)[6:], e)
+ return nil
+}
+
+// Compare returns an integer comparing id and other lexicographically.
+// The result will be 0 if id==other, -1 if id < other, and +1 if id > other.
+func (id ULID) Compare(other ULID) int {
+ return bytes.Compare(id[:], other[:])
+}
+
+// Scan implements the sql.Scanner interface. It supports scanning
+// a string or byte slice.
+func (id *ULID) Scan(src interface{}) error {
+ switch x := src.(type) {
+ case nil:
+ return nil
+ case string:
+ return id.UnmarshalText([]byte(x))
+ case []byte:
+ return id.UnmarshalBinary(x)
+ }
+
+ return ErrScanValue
+}
+
+// Value implements the sql/driver.Valuer interface. This returns the value
+// represented as a byte slice. If instead a string is desirable, a wrapper
+// type can be created that calls String().
+//
+// // stringValuer wraps a ULID as a string-based driver.Valuer.
+// type stringValuer ULID
+//
+// func (id stringValuer) Value() (driver.Value, error) {
+// return ULID(id).String(), nil
+// }
+//
+// // Example usage.
+// db.Exec("...", stringValuer(id))
+func (id ULID) Value() (driver.Value, error) {
+ return id.MarshalBinary()
+}
+
+// Monotonic returns an entropy source that is guaranteed to yield
+// strictly increasing entropy bytes for the same ULID timestamp.
+// On conflicts, the previous ULID entropy is incremented with a
+// random number between 1 and `inc` (inclusive).
+//
+// The provided entropy source must actually yield random bytes or else
+// monotonic reads are not guaranteed to terminate, since there isn't
+// enough randomness to compute an increment number.
+//
+// When `inc == 0`, it'll be set to a secure default of `math.MaxUint32`.
+// The lower the value of `inc`, the easier the next ULID within the
+// same millisecond is to guess. If your code depends on ULIDs having
+// secure entropy bytes, then don't go under this default unless you know
+// what you're doing.
+//
+// The returned io.Reader isn't safe for concurrent use.
+func Monotonic(entropy io.Reader, inc uint64) io.Reader {
+ m := monotonic{
+ Reader: bufio.NewReader(entropy),
+ inc: inc,
+ }
+
+ if m.inc == 0 {
+ m.inc = math.MaxUint32
+ }
+
+ if rng, ok := entropy.(*rand.Rand); ok {
+ m.rng = rng
+ }
+
+ return &m
+}
+
+type monotonic struct {
+ io.Reader
+ ms uint64
+ inc uint64
+ entropy uint80
+ rand [8]byte
+ rng *rand.Rand
+}
+
+func (m *monotonic) MonotonicRead(ms uint64, entropy []byte) (err error) {
+ if !m.entropy.IsZero() && m.ms == ms {
+ err = m.increment()
+ m.entropy.AppendTo(entropy)
+ } else if _, err = io.ReadFull(m.Reader, entropy); err == nil {
+ m.ms = ms
+ m.entropy.SetBytes(entropy)
+ }
+ return err
+}
+
+// increment the previous entropy number with a random number
+// of up to m.inc (inclusive).
+func (m *monotonic) increment() error {
+ if inc, err := m.random(); err != nil {
+ return err
+ } else if m.entropy.Add(inc) {
+ return ErrMonotonicOverflow
+ }
+ return nil
+}
+
+// random returns a uniform random value in [1, m.inc), reading entropy
+// from m.Reader. When m.inc == 0 || m.inc == 1, it returns 1.
+// Adapted from: https://golang.org/pkg/crypto/rand/#Int
+func (m *monotonic) random() (inc uint64, err error) {
+ if m.inc <= 1 {
+ return 1, nil
+ }
+
+ // Fast path for using a underlying rand.Rand directly.
+ if m.rng != nil {
+ // Range: [1, m.inc)
+ return 1 + uint64(m.rng.Int63n(int64(m.inc))), nil
+ }
+
+ // bitLen is the maximum bit length needed to encode a value < m.inc.
+ bitLen := bits.Len64(m.inc)
+
+ // byteLen is the maximum byte length needed to encode a value < m.inc.
+ byteLen := uint(bitLen+7) / 8
+
+ // msbitLen is the number of bits in the most significant byte of m.inc-1.
+ msbitLen := uint(bitLen % 8)
+ if msbitLen == 0 {
+ msbitLen = 8
+ }
+
+ for inc == 0 || inc >= m.inc {
+ if _, err = io.ReadFull(m.Reader, m.rand[:byteLen]); err != nil {
+ return 0, err
+ }
+
+ // Clear bits in the first byte to increase the probability
+ // that the candidate is < m.inc.
+ m.rand[0] &= uint8(int(1< len(ev.providers) {
+ return nil, errors.New("invalid threshold")
+ }
+
+ if len(usedKeyids) < ev.threshold {
+ return acceptedKeys, fmt.Errorf("accepted signatures do not match threshold, Found: %d, Expected %d", len(acceptedKeys), ev.threshold)
+ }
+
+ return acceptedKeys, nil
+}
+
+func NewEnvelopeVerifier(v ...Verifier) (*EnvelopeVerifier, error) {
+ return NewMultiEnvelopeVerifier(1, v...)
+}
+
+func NewMultiEnvelopeVerifier(threshold int, p ...Verifier) (*EnvelopeVerifier, error) {
+ if threshold <= 0 || threshold > len(p) {
+ return nil, errors.New("invalid threshold")
+ }
+
+ ev := EnvelopeVerifier{
+ providers: p,
+ threshold: threshold,
+ }
+
+ return &ev, nil
+}
+
+func SHA256KeyID(pub crypto.PublicKey) (string, error) {
+ // Generate public key fingerprint
+ sshpk, err := ssh.NewPublicKey(pub)
+ if err != nil {
+ return "", err
+ }
+ fingerprint := ssh.FingerprintSHA256(sshpk)
+ return fingerprint, nil
+}
+
+func removeIndex(v []Verifier, index int) []Verifier {
+ return append(v[:index], v[index+1:]...)
+}
diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go
new file mode 100644
index 00000000000..691091af99a
--- /dev/null
+++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go
@@ -0,0 +1,119 @@
+package signerverifier
+
+import (
+ "context"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rand"
+ "crypto/sha256"
+ "crypto/sha512"
+ "fmt"
+ "os"
+)
+
+const (
+ ECDSAKeyType = "ecdsa"
+ ECDSAKeyScheme = "ecdsa-sha2-nistp256"
+)
+
+// ECDSASignerVerifier is a dsse.SignerVerifier compliant interface to sign and
+// verify signatures using ECDSA keys.
+type ECDSASignerVerifier struct {
+ keyID string
+ curveSize int
+ private *ecdsa.PrivateKey
+ public *ecdsa.PublicKey
+}
+
+// NewECDSASignerVerifierFromSSLibKey creates an ECDSASignerVerifier from an
+// SSLibKey.
+func NewECDSASignerVerifierFromSSLibKey(key *SSLibKey) (*ECDSASignerVerifier, error) {
+ if len(key.KeyVal.Public) == 0 {
+ return nil, ErrInvalidKey
+ }
+
+ _, publicParsedKey, err := decodeAndParsePEM([]byte(key.KeyVal.Public))
+ if err != nil {
+ return nil, fmt.Errorf("unable to create ECDSA signerverifier: %w", err)
+ }
+
+ sv := &ECDSASignerVerifier{
+ keyID: key.KeyID,
+ curveSize: publicParsedKey.(*ecdsa.PublicKey).Params().BitSize,
+ public: publicParsedKey.(*ecdsa.PublicKey),
+ private: nil,
+ }
+
+ if len(key.KeyVal.Private) > 0 {
+ _, privateParsedKey, err := decodeAndParsePEM([]byte(key.KeyVal.Private))
+ if err != nil {
+ return nil, fmt.Errorf("unable to create ECDSA signerverifier: %w", err)
+ }
+
+ sv.private = privateParsedKey.(*ecdsa.PrivateKey)
+ }
+
+ return sv, nil
+}
+
+// Sign creates a signature for `data`.
+func (sv *ECDSASignerVerifier) Sign(ctx context.Context, data []byte) ([]byte, error) {
+ if sv.private == nil {
+ return nil, ErrNotPrivateKey
+ }
+
+ hashedData := getECDSAHashedData(data, sv.curveSize)
+
+ return ecdsa.SignASN1(rand.Reader, sv.private, hashedData)
+}
+
+// Verify verifies the `sig` value passed in against `data`.
+func (sv *ECDSASignerVerifier) Verify(ctx context.Context, data []byte, sig []byte) error {
+ hashedData := getECDSAHashedData(data, sv.curveSize)
+
+ if ok := ecdsa.VerifyASN1(sv.public, hashedData, sig); !ok {
+ return ErrSignatureVerificationFailed
+ }
+
+ return nil
+}
+
+// KeyID returns the identifier of the key used to create the
+// ECDSASignerVerifier instance.
+func (sv *ECDSASignerVerifier) KeyID() (string, error) {
+ return sv.keyID, nil
+}
+
+// Public returns the public portion of the key used to create the
+// ECDSASignerVerifier instance.
+func (sv *ECDSASignerVerifier) Public() crypto.PublicKey {
+ return sv.public
+}
+
+// LoadECDSAKeyFromFile returns an SSLibKey instance for an ECDSA key stored in
+// a file in the custom securesystemslib format.
+//
+// Deprecated: use LoadKey(). The custom serialization format has been
+// deprecated. Use
+// https://github.com/secure-systems-lab/securesystemslib/blob/main/docs/migrate_key.py
+// to convert your key.
+func LoadECDSAKeyFromFile(path string) (*SSLibKey, error) {
+ contents, err := os.ReadFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("unable to load ECDSA key from file: %w", err)
+ }
+
+ return LoadKeyFromSSLibBytes(contents)
+}
+
+func getECDSAHashedData(data []byte, curveSize int) []byte {
+ switch {
+ case curveSize <= 256:
+ return hashBeforeSigning(data, sha256.New())
+ case 256 < curveSize && curveSize <= 384:
+ return hashBeforeSigning(data, sha512.New384())
+ case curveSize > 384:
+ return hashBeforeSigning(data, sha512.New())
+ }
+ return []byte{}
+}
diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go
new file mode 100644
index 00000000000..d954e14b749
--- /dev/null
+++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go
@@ -0,0 +1,103 @@
+package signerverifier
+
+import (
+ "context"
+ "crypto"
+ "crypto/ed25519"
+ "encoding/hex"
+ "fmt"
+ "os"
+)
+
+const ED25519KeyType = "ed25519"
+
+// ED25519SignerVerifier is a dsse.SignerVerifier compliant interface to sign
+// and verify signatures using ED25519 keys.
+type ED25519SignerVerifier struct {
+ keyID string
+ private ed25519.PrivateKey
+ public ed25519.PublicKey
+}
+
+// NewED25519SignerVerifierFromSSLibKey creates an Ed25519SignerVerifier from an
+// SSLibKey.
+func NewED25519SignerVerifierFromSSLibKey(key *SSLibKey) (*ED25519SignerVerifier, error) {
+ if len(key.KeyVal.Public) == 0 {
+ return nil, ErrInvalidKey
+ }
+
+ public, err := hex.DecodeString(key.KeyVal.Public)
+ if err != nil {
+ return nil, fmt.Errorf("unable to create ED25519 signerverifier: %w", err)
+ }
+
+ var private []byte
+ if len(key.KeyVal.Private) > 0 {
+ private, err = hex.DecodeString(key.KeyVal.Private)
+ if err != nil {
+ return nil, fmt.Errorf("unable to create ED25519 signerverifier: %w", err)
+ }
+
+ // python-securesystemslib provides an interface to generate ed25519
+ // keys but it differs slightly in how it serializes the key to disk.
+ // Specifically, the keyval.private field includes _only_ the private
+ // portion of the key while libraries such as crypto/ed25519 also expect
+ // the public portion. So, if the private portion is half of what we
+ // expect, we append the public portion as well.
+ if len(private) == ed25519.PrivateKeySize/2 {
+ private = append(private, public...)
+ }
+ }
+
+ return &ED25519SignerVerifier{
+ keyID: key.KeyID,
+ public: ed25519.PublicKey(public),
+ private: ed25519.PrivateKey(private),
+ }, nil
+}
+
+// Sign creates a signature for `data`.
+func (sv *ED25519SignerVerifier) Sign(ctx context.Context, data []byte) ([]byte, error) {
+ if len(sv.private) == 0 {
+ return nil, ErrNotPrivateKey
+ }
+
+ signature := ed25519.Sign(sv.private, data)
+ return signature, nil
+}
+
+// Verify verifies the `sig` value passed in against `data`.
+func (sv *ED25519SignerVerifier) Verify(ctx context.Context, data []byte, sig []byte) error {
+ if ok := ed25519.Verify(sv.public, data, sig); ok {
+ return nil
+ }
+ return ErrSignatureVerificationFailed
+}
+
+// KeyID returns the identifier of the key used to create the
+// ED25519SignerVerifier instance.
+func (sv *ED25519SignerVerifier) KeyID() (string, error) {
+ return sv.keyID, nil
+}
+
+// Public returns the public portion of the key used to create the
+// ED25519SignerVerifier instance.
+func (sv *ED25519SignerVerifier) Public() crypto.PublicKey {
+ return sv.public
+}
+
+// LoadED25519KeyFromFile returns an SSLibKey instance for an ED25519 key stored
+// in a file in the custom securesystemslib format.
+//
+// Deprecated: use LoadKey(). The custom serialization format has been
+// deprecated. Use
+// https://github.com/secure-systems-lab/securesystemslib/blob/main/docs/migrate_key.py
+// to convert your key.
+func LoadED25519KeyFromFile(path string) (*SSLibKey, error) {
+ contents, err := os.ReadFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("unable to load ED25519 key from file: %w", err)
+ }
+
+ return LoadKeyFromSSLibBytes(contents)
+}
diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go
new file mode 100644
index 00000000000..2abfcb27c4b
--- /dev/null
+++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go
@@ -0,0 +1,170 @@
+package signerverifier
+
+import (
+ "context"
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha256"
+ "crypto/x509"
+ "fmt"
+ "os"
+ "strings"
+)
+
+const (
+ RSAKeyType = "rsa"
+ RSAKeyScheme = "rsassa-pss-sha256"
+ RSAPrivateKeyPEM = "RSA PRIVATE KEY"
+)
+
+// RSAPSSSignerVerifier is a dsse.SignerVerifier compliant interface to sign and
+// verify signatures using RSA keys following the RSA-PSS scheme.
+type RSAPSSSignerVerifier struct {
+ keyID string
+ private *rsa.PrivateKey
+ public *rsa.PublicKey
+}
+
+// NewRSAPSSSignerVerifierFromSSLibKey creates an RSAPSSSignerVerifier from an
+// SSLibKey.
+func NewRSAPSSSignerVerifierFromSSLibKey(key *SSLibKey) (*RSAPSSSignerVerifier, error) {
+ if len(key.KeyVal.Public) == 0 {
+ return nil, ErrInvalidKey
+ }
+
+ _, publicParsedKey, err := decodeAndParsePEM([]byte(key.KeyVal.Public))
+ if err != nil {
+ return nil, fmt.Errorf("unable to create RSA-PSS signerverifier: %w", err)
+ }
+
+ if len(key.KeyVal.Private) > 0 {
+ _, privateParsedKey, err := decodeAndParsePEM([]byte(key.KeyVal.Private))
+ if err != nil {
+ return nil, fmt.Errorf("unable to create RSA-PSS signerverifier: %w", err)
+ }
+
+ return &RSAPSSSignerVerifier{
+ keyID: key.KeyID,
+ public: publicParsedKey.(*rsa.PublicKey),
+ private: privateParsedKey.(*rsa.PrivateKey),
+ }, nil
+ }
+
+ return &RSAPSSSignerVerifier{
+ keyID: key.KeyID,
+ public: publicParsedKey.(*rsa.PublicKey),
+ private: nil,
+ }, nil
+}
+
+// Sign creates a signature for `data`.
+func (sv *RSAPSSSignerVerifier) Sign(ctx context.Context, data []byte) ([]byte, error) {
+ if sv.private == nil {
+ return nil, ErrNotPrivateKey
+ }
+
+ hashedData := hashBeforeSigning(data, sha256.New())
+
+ return rsa.SignPSS(rand.Reader, sv.private, crypto.SHA256, hashedData, &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256})
+}
+
+// Verify verifies the `sig` value passed in against `data`.
+func (sv *RSAPSSSignerVerifier) Verify(ctx context.Context, data []byte, sig []byte) error {
+ hashedData := hashBeforeSigning(data, sha256.New())
+
+ if err := rsa.VerifyPSS(sv.public, crypto.SHA256, hashedData, sig, &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}); err != nil {
+ return ErrSignatureVerificationFailed
+ }
+
+ return nil
+}
+
+// KeyID returns the identifier of the key used to create the
+// RSAPSSSignerVerifier instance.
+func (sv *RSAPSSSignerVerifier) KeyID() (string, error) {
+ return sv.keyID, nil
+}
+
+// Public returns the public portion of the key used to create the
+// RSAPSSSignerVerifier instance.
+func (sv *RSAPSSSignerVerifier) Public() crypto.PublicKey {
+ return sv.public
+}
+
+// LoadRSAPSSKeyFromFile returns an SSLibKey instance for an RSA key stored in a
+// file.
+//
+// Deprecated: use LoadKey(). The custom serialization format has been
+// deprecated. Use
+// https://github.com/secure-systems-lab/securesystemslib/blob/main/docs/migrate_key.py
+// to convert your key.
+func LoadRSAPSSKeyFromFile(path string) (*SSLibKey, error) {
+ contents, err := os.ReadFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("unable to load RSA key from file: %w", err)
+ }
+
+ return LoadRSAPSSKeyFromBytes(contents)
+}
+
+// LoadRSAPSSKeyFromBytes is a function that takes a byte array as input. This
+// byte array should represent a PEM encoded RSA key, as PEM encoding is
+// required. The function returns an SSLibKey instance, which is a struct that
+// holds the key data.
+//
+// Deprecated: use LoadKey() for all key types, RSA is no longer the only key
+// that uses PEM serialization.
+func LoadRSAPSSKeyFromBytes(contents []byte) (*SSLibKey, error) {
+ pemData, keyObj, err := decodeAndParsePEM(contents)
+ if err != nil {
+ return nil, fmt.Errorf("unable to load RSA key from file: %w", err)
+ }
+
+ key := &SSLibKey{
+ KeyType: RSAKeyType,
+ Scheme: RSAKeyScheme,
+ KeyIDHashAlgorithms: KeyIDHashAlgorithms,
+ KeyVal: KeyVal{},
+ }
+
+ pubKeyBytes, err := marshalAndGeneratePEM(keyObj)
+ if err != nil {
+ return nil, fmt.Errorf("unable to load RSA key from file: %w", err)
+ }
+ key.KeyVal.Public = strings.TrimSpace(string(pubKeyBytes))
+
+ if _, ok := keyObj.(*rsa.PrivateKey); ok {
+ key.KeyVal.Private = strings.TrimSpace(string(generatePEMBlock(pemData.Bytes, RSAPrivateKeyPEM)))
+ }
+
+ if len(key.KeyID) == 0 {
+ keyID, err := calculateKeyID(key)
+ if err != nil {
+ return nil, fmt.Errorf("unable to load RSA key from file: %w", err)
+ }
+ key.KeyID = keyID
+ }
+
+ return key, nil
+}
+
+func marshalAndGeneratePEM(key interface{}) ([]byte, error) {
+ var pubKeyBytes []byte
+ var err error
+
+ switch k := key.(type) {
+ case *rsa.PublicKey:
+ pubKeyBytes, err = x509.MarshalPKIXPublicKey(k)
+ case *rsa.PrivateKey:
+ pubKeyBytes, err = x509.MarshalPKIXPublicKey(k.Public())
+ default:
+ return nil, fmt.Errorf("unexpected key type: %T", k)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return generatePEMBlock(pubKeyBytes, PublicKeyPEM), nil
+}
diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go
new file mode 100644
index 00000000000..3a8259dfda3
--- /dev/null
+++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go
@@ -0,0 +1,146 @@
+package signerverifier
+
+import (
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/hex"
+ "errors"
+ "strings"
+)
+
+var KeyIDHashAlgorithms = []string{"sha256", "sha512"}
+
+var (
+ ErrNotPrivateKey = errors.New("loaded key is not a private key")
+ ErrSignatureVerificationFailed = errors.New("failed to verify signature")
+ ErrUnknownKeyType = errors.New("unknown key type")
+ ErrInvalidThreshold = errors.New("threshold is either less than 1 or greater than number of provided public keys")
+ ErrInvalidKey = errors.New("key object has no value")
+ ErrInvalidPEM = errors.New("unable to parse PEM block")
+)
+
+const (
+ PublicKeyPEM = "PUBLIC KEY"
+ PrivateKeyPEM = "PRIVATE KEY"
+)
+
+type SSLibKey struct {
+ KeyIDHashAlgorithms []string `json:"keyid_hash_algorithms"`
+ KeyType string `json:"keytype"`
+ KeyVal KeyVal `json:"keyval"`
+ Scheme string `json:"scheme"`
+ KeyID string `json:"keyid"`
+}
+
+type KeyVal struct {
+ Private string `json:"private,omitempty"`
+ Public string `json:"public,omitempty"`
+ Certificate string `json:"certificate,omitempty"`
+ Identity string `json:"identity,omitempty"`
+ Issuer string `json:"issuer,omitempty"`
+}
+
+// LoadKey returns an SSLibKey object when provided a PEM encoded key.
+// Currently, RSA, ED25519, and ECDSA keys are supported.
+func LoadKey(keyBytes []byte) (*SSLibKey, error) {
+ pemBlock, rawKey, err := decodeAndParsePEM(keyBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ var key *SSLibKey
+ switch k := rawKey.(type) {
+ case *rsa.PublicKey:
+ pubKeyBytes, err := x509.MarshalPKIXPublicKey(k)
+ if err != nil {
+ return nil, err
+ }
+ key = &SSLibKey{
+ KeyIDHashAlgorithms: KeyIDHashAlgorithms,
+ KeyType: RSAKeyType,
+ KeyVal: KeyVal{
+ Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, PublicKeyPEM))),
+ },
+ Scheme: RSAKeyScheme,
+ }
+
+ case *rsa.PrivateKey:
+ pubKeyBytes, err := x509.MarshalPKIXPublicKey(k.Public())
+ if err != nil {
+ return nil, err
+ }
+ key = &SSLibKey{
+ KeyIDHashAlgorithms: KeyIDHashAlgorithms,
+ KeyType: RSAKeyType,
+ KeyVal: KeyVal{
+ Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, PublicKeyPEM))),
+ Private: strings.TrimSpace(string(generatePEMBlock(pemBlock.Bytes, pemBlock.Type))),
+ },
+ Scheme: RSAKeyScheme,
+ }
+
+ case ed25519.PublicKey:
+ key = &SSLibKey{
+ KeyIDHashAlgorithms: KeyIDHashAlgorithms,
+ KeyType: ED25519KeyType,
+ KeyVal: KeyVal{
+ Public: strings.TrimSpace(hex.EncodeToString(k)),
+ },
+ Scheme: ED25519KeyType,
+ }
+
+ case ed25519.PrivateKey:
+ pubKeyBytes := k.Public()
+ key = &SSLibKey{
+ KeyIDHashAlgorithms: KeyIDHashAlgorithms,
+ KeyType: ED25519KeyType,
+ KeyVal: KeyVal{
+ Public: strings.TrimSpace(hex.EncodeToString(pubKeyBytes.(ed25519.PublicKey))),
+ Private: strings.TrimSpace(hex.EncodeToString(k)),
+ },
+ Scheme: ED25519KeyType,
+ }
+
+ case *ecdsa.PublicKey:
+ pubKeyBytes, err := x509.MarshalPKIXPublicKey(k)
+ if err != nil {
+ return nil, err
+ }
+ key = &SSLibKey{
+ KeyIDHashAlgorithms: KeyIDHashAlgorithms,
+ KeyType: ECDSAKeyType,
+ KeyVal: KeyVal{
+ Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, PublicKeyPEM))),
+ },
+ Scheme: ECDSAKeyScheme,
+ }
+
+ case *ecdsa.PrivateKey:
+ pubKeyBytes, err := x509.MarshalPKIXPublicKey(k.Public())
+ if err != nil {
+ return nil, err
+ }
+ key = &SSLibKey{
+ KeyIDHashAlgorithms: KeyIDHashAlgorithms,
+ KeyType: ECDSAKeyType,
+ KeyVal: KeyVal{
+ Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, PublicKeyPEM))),
+ Private: strings.TrimSpace(string(generatePEMBlock(pemBlock.Bytes, PrivateKeyPEM))),
+ },
+ Scheme: ECDSAKeyScheme,
+ }
+
+ default:
+ return nil, ErrUnknownKeyType
+ }
+
+ keyID, err := calculateKeyID(key)
+ if err != nil {
+ return nil, err
+ }
+ key.KeyID = keyID
+
+ return key, nil
+}
diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go
new file mode 100644
index 00000000000..cc0188be3e9
--- /dev/null
+++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go
@@ -0,0 +1,142 @@
+package signerverifier
+
+import (
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/hex"
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "hash"
+
+ "github.com/secure-systems-lab/go-securesystemslib/cjson"
+)
+
+/*
+Credits: Parts of this file were originally authored for in-toto-golang.
+*/
+
+var (
+ // ErrNoPEMBlock gets triggered when there is no PEM block in the provided file
+ ErrNoPEMBlock = errors.New("failed to decode the data as PEM block (are you sure this is a pem file?)")
+ // ErrFailedPEMParsing gets returned when PKCS1, PKCS8 or PKIX key parsing fails
+ ErrFailedPEMParsing = errors.New("failed parsing the PEM block: unsupported PEM type")
+)
+
+// LoadKeyFromSSLibBytes returns a pointer to a Key instance created from the
+// contents of the bytes. The key contents are expected to be in the custom
+// securesystemslib format.
+//
+// Deprecated: use LoadKey() for all key types, RSA is no longer the only key
+// that uses PEM serialization.
+func LoadKeyFromSSLibBytes(contents []byte) (*SSLibKey, error) {
+ var key *SSLibKey
+ if err := json.Unmarshal(contents, &key); err != nil {
+ return LoadRSAPSSKeyFromBytes(contents)
+ }
+ if len(key.KeyID) == 0 {
+ keyID, err := calculateKeyID(key)
+ if err != nil {
+ return nil, err
+ }
+ key.KeyID = keyID
+ }
+
+ return key, nil
+}
+
+func calculateKeyID(k *SSLibKey) (string, error) {
+ key := map[string]any{
+ "keytype": k.KeyType,
+ "scheme": k.Scheme,
+ "keyid_hash_algorithms": k.KeyIDHashAlgorithms,
+ "keyval": map[string]string{
+ "public": k.KeyVal.Public,
+ },
+ }
+ canonical, err := cjson.EncodeCanonical(key)
+ if err != nil {
+ return "", err
+ }
+ digest := sha256.Sum256(canonical)
+ return hex.EncodeToString(digest[:]), nil
+}
+
+/*
+generatePEMBlock creates a PEM block from scratch via the keyBytes and the pemType.
+If successful it returns a PEM block as []byte slice. This function should always
+succeed, if keyBytes is empty the PEM block will have an empty byte block.
+Therefore only header and footer will exist.
+*/
+func generatePEMBlock(keyBytes []byte, pemType string) []byte {
+ // construct PEM block
+ pemBlock := &pem.Block{
+ Type: pemType,
+ Headers: nil,
+ Bytes: keyBytes,
+ }
+ return pem.EncodeToMemory(pemBlock)
+}
+
+/*
+decodeAndParsePEM receives potential PEM bytes decodes them via pem.Decode
+and pushes them to parseKey. If any error occurs during this process,
+the function will return nil and an error (either ErrFailedPEMParsing
+or ErrNoPEMBlock). On success it will return the decoded pemData, the
+key object interface and nil as error. We need the decoded pemData,
+because LoadKey relies on decoded pemData for operating system
+interoperability.
+*/
+func decodeAndParsePEM(pemBytes []byte) (*pem.Block, any, error) {
+ // pem.Decode returns the parsed pem block and a rest.
+ // The rest is everything, that could not be parsed as PEM block.
+ // Therefore we can drop this via using the blank identifier "_"
+ data, _ := pem.Decode(pemBytes)
+ if data == nil {
+ return nil, nil, ErrNoPEMBlock
+ }
+
+ // Try to load private key, if this fails try to load
+ // key as public key
+ key, err := parsePEMKey(data.Bytes)
+ if err != nil {
+ return nil, nil, err
+ }
+ return data, key, nil
+}
+
+/*
+parseKey tries to parse a PEM []byte slice. Using the following standards
+in the given order:
+
+ - PKCS8
+ - PKCS1
+ - PKIX
+
+On success it returns the parsed key and nil.
+On failure it returns nil and the error ErrFailedPEMParsing
+*/
+func parsePEMKey(data []byte) (any, error) {
+ key, err := x509.ParsePKCS8PrivateKey(data)
+ if err == nil {
+ return key, nil
+ }
+ key, err = x509.ParsePKCS1PrivateKey(data)
+ if err == nil {
+ return key, nil
+ }
+ key, err = x509.ParsePKIXPublicKey(data)
+ if err == nil {
+ return key, nil
+ }
+ key, err = x509.ParseECPrivateKey(data)
+ if err == nil {
+ return key, nil
+ }
+ return nil, ErrFailedPEMParsing
+}
+
+func hashBeforeSigning(data []byte, h hash.Hash) []byte {
+ h.Write(data)
+ return h.Sum(nil)
+}
diff --git a/vendor/github.com/shibumi/go-pathspec/.gitignore b/vendor/github.com/shibumi/go-pathspec/.gitignore
new file mode 100644
index 00000000000..3e32393f123
--- /dev/null
+++ b/vendor/github.com/shibumi/go-pathspec/.gitignore
@@ -0,0 +1,26 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+
+# ignore .idea
+.idea
diff --git a/vendor/github.com/shibumi/go-pathspec/GO-LICENSE b/vendor/github.com/shibumi/go-pathspec/GO-LICENSE
new file mode 100644
index 00000000000..74487567632
--- /dev/null
+++ b/vendor/github.com/shibumi/go-pathspec/GO-LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/shibumi/go-pathspec/LICENSE b/vendor/github.com/shibumi/go-pathspec/LICENSE
new file mode 100644
index 00000000000..5c304d1a4a7
--- /dev/null
+++ b/vendor/github.com/shibumi/go-pathspec/LICENSE
@@ -0,0 +1,201 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/shibumi/go-pathspec/README.md b/vendor/github.com/shibumi/go-pathspec/README.md
new file mode 100644
index 00000000000..c146cf69b01
--- /dev/null
+++ b/vendor/github.com/shibumi/go-pathspec/README.md
@@ -0,0 +1,45 @@
+# go-pathspec
+
+[](https://github.com/shibumi/go-pathspec/actions?query=workflow%3Abuild) [](https://coveralls.io/github/shibumi/go-pathspec) [](https://pkg.go.dev/github.com/shibumi/go-pathspec)
+
+go-pathspec implements gitignore-style pattern matching for paths.
+
+## Alternatives
+
+There are a few alternatives, that try to be gitignore compatible or even state
+gitignore compatibility:
+
+### https://github.com/go-git/go-git
+
+go-git states it would be gitignore compatible, but actually they are missing a few
+special cases. This issue describes one of the not working patterns: https://github.com/go-git/go-git/issues/108
+
+What does not work is global filename pattern matching. Consider the following
+`.gitignore` file:
+
+```gitignore
+# gitignore test file
+parse.go
+```
+
+Then `parse.go` should match on all filenames called `parse.go`. You can test this via
+this shell script:
+```shell
+mkdir -p /tmp/test/internal/util
+touch /tmp/test/internal/util/parse.go
+cd /tmp/test/
+git init
+echo "parse.go" > .gitignore
+```
+
+With git `parse.go` will be excluded. The go-git implementation behaves different.
+
+### https://github.com/monochromegane/go-gitignore
+
+monochromegane's go-gitignore does not support the use of `**`-operators.
+This is not consistent to real gitignore behavior, too.
+
+## Authors
+
+Sander van Harmelen ()
+Christian Rebischke ()
diff --git a/vendor/github.com/shibumi/go-pathspec/gitignore.go b/vendor/github.com/shibumi/go-pathspec/gitignore.go
new file mode 100644
index 00000000000..2b08d4e8a57
--- /dev/null
+++ b/vendor/github.com/shibumi/go-pathspec/gitignore.go
@@ -0,0 +1,299 @@
+//
+// Copyright 2014, Sander van Harmelen
+// Copyright 2020, Christian Rebischke
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Package pathspec implements git compatible gitignore pattern matching.
+// See the description below, if you are unfamiliar with it:
+//
+// A blank line matches no files, so it can serve as a separator for readability.
+//
+// A line starting with # serves as a comment. Put a backslash ("\") in front of
+// the first hash for patterns that begin with a hash.
+//
+// An optional prefix "!" which negates the pattern; any matching file excluded
+// by a previous pattern will become included again. If a negated pattern matches,
+// this will override lower precedence patterns sources. Put a backslash ("\") in
+// front of the first "!" for patterns that begin with a literal "!", for example,
+// "\!important!.txt".
+//
+// If the pattern ends with a slash, it is removed for the purpose of the following
+// description, but it would only find a match with a directory. In other words,
+// foo/ will match a directory foo and paths underneath it, but will not match a
+// regular file or a symbolic link foo (this is consistent with the way how pathspec
+// works in general in Git).
+//
+// If the pattern does not contain a slash /, Git treats it as a shell glob pattern
+// and checks for a match against the pathname relative to the location of the
+// .gitignore file (relative to the toplevel of the work tree if not from a
+// .gitignore file).
+//
+// Otherwise, Git treats the pattern as a shell glob suitable for consumption by
+// fnmatch(3) with the FNM_PATHNAME flag: wildcards in the pattern will not match
+// a / in the pathname. For example, "Documentation/*.html" matches
+// "Documentation/git.html" but not "Documentation/ppc/ppc.html" or/
+// "tools/perf/Documentation/perf.html".
+//
+// A leading slash matches the beginning of the pathname. For example, "/*.c"
+// matches "cat-file.c" but not "mozilla-sha1/sha1.c".
+//
+// Two consecutive asterisks ("**") in patterns matched against full pathname
+// may have special meaning:
+//
+// A leading "**" followed by a slash means match in all directories. For example,
+// "**/foo" matches file or directory "foo" anywhere, the same as pattern "foo".
+// "**/foo/bar" matches file or directory "bar" anywhere that is directly under
+// directory "foo".
+//
+// A trailing "/" matches everything inside. For example, "abc/" matches all files
+// inside directory "abc", relative to the location of the .gitignore file, with
+// infinite depth.
+//
+// A slash followed by two consecutive asterisks then a slash matches zero or more
+// directories. For example, "a/**/b" matches "a/b", "a/x/b", "a/x/y/b" and so on.
+//
+// Other consecutive asterisks are considered invalid.
+package pathspec
+
+import (
+ "bufio"
+ "bytes"
+ "io"
+ "path/filepath"
+ "regexp"
+ "strings"
+)
+
+type gitIgnorePattern struct {
+ Regex string
+ Include bool
+}
+
+// GitIgnore uses a string slice of patterns for matching on a filepath string.
+// On match it returns true, otherwise false. On error it passes the error through.
+func GitIgnore(patterns []string, name string) (ignore bool, err error) {
+ for _, pattern := range patterns {
+ p := parsePattern(pattern)
+ // Convert Windows paths to Unix paths
+ name = filepath.ToSlash(name)
+ match, err := regexp.MatchString(p.Regex, name)
+ if err != nil {
+ return ignore, err
+ }
+ if match {
+ if p.Include {
+ return false, nil
+ }
+ ignore = true
+ }
+ }
+ return ignore, nil
+}
+
+// ReadGitIgnore implements the io.Reader interface for reading a gitignore file
+// line by line. It behaves exactly like the GitIgnore function. The only difference
+// is that GitIgnore works on a string slice.
+//
+// ReadGitIgnore returns a boolean value if we match or not and an error.
+func ReadGitIgnore(content io.Reader, name string) (ignore bool, err error) {
+ scanner := bufio.NewScanner(content)
+
+ for scanner.Scan() {
+ pattern := strings.TrimSpace(scanner.Text())
+ if len(pattern) == 0 || pattern[0] == '#' {
+ continue
+ }
+ p := parsePattern(pattern)
+ // Convert Windows paths to Unix paths
+ name = filepath.ToSlash(name)
+ match, err := regexp.MatchString(p.Regex, name)
+ if err != nil {
+ return ignore, err
+ }
+ if match {
+ if p.Include {
+ return false, scanner.Err()
+ }
+ ignore = true
+ }
+ }
+ return ignore, scanner.Err()
+}
+
+func parsePattern(pattern string) *gitIgnorePattern {
+ p := &gitIgnorePattern{}
+
+ // An optional prefix "!" which negates the pattern; any matching file
+ // excluded by a previous pattern will become included again.
+ if strings.HasPrefix(pattern, "!") {
+ pattern = pattern[1:]
+ p.Include = true
+ } else {
+ p.Include = false
+ }
+
+ // Remove leading back-slash escape for escaped hash ('#') or
+ // exclamation mark ('!').
+ if strings.HasPrefix(pattern, "\\") {
+ pattern = pattern[1:]
+ }
+
+ // Split pattern into segments.
+ patternSegs := strings.Split(pattern, "/")
+
+ // A pattern beginning with a slash ('/') will only match paths
+ // directly on the root directory instead of any descendant paths.
+ // So remove empty first segment to make pattern absoluut to root.
+ // A pattern without a beginning slash ('/') will match any
+ // descendant path. This is equivilent to "**/{pattern}". So
+ // prepend with double-asterisks to make pattern relative to
+ // root.
+ if patternSegs[0] == "" {
+ patternSegs = patternSegs[1:]
+ } else if patternSegs[0] != "**" {
+ patternSegs = append([]string{"**"}, patternSegs...)
+ }
+
+ // A pattern ending with a slash ('/') will match all descendant
+ // paths of if it is a directory but not if it is a regular file.
+ // This is equivalent to "{pattern}/**". So, set last segment to
+ // double asterisks to include all descendants.
+ if patternSegs[len(patternSegs)-1] == "" {
+ patternSegs[len(patternSegs)-1] = "**"
+ }
+
+ // Build regular expression from pattern.
+ var expr bytes.Buffer
+ expr.WriteString("^")
+ needSlash := false
+
+ for i, seg := range patternSegs {
+ switch seg {
+ case "**":
+ switch {
+ case i == 0 && i == len(patternSegs)-1:
+ // A pattern consisting solely of double-asterisks ('**')
+ // will match every path.
+ expr.WriteString(".+")
+ case i == 0:
+ // A normalized pattern beginning with double-asterisks
+ // ('**') will match any leading path segments.
+ expr.WriteString("(?:.+/)?")
+ needSlash = false
+ case i == len(patternSegs)-1:
+ // A normalized pattern ending with double-asterisks ('**')
+ // will match any trailing path segments.
+ expr.WriteString("/.+")
+ default:
+ // A pattern with inner double-asterisks ('**') will match
+ // multiple (or zero) inner path segments.
+ expr.WriteString("(?:/.+)?")
+ needSlash = true
+ }
+ case "*":
+ // Match single path segment.
+ if needSlash {
+ expr.WriteString("/")
+ }
+ expr.WriteString("[^/]+")
+ needSlash = true
+ default:
+ // Match segment glob pattern.
+ if needSlash {
+ expr.WriteString("/")
+ }
+ expr.WriteString(translateGlob(seg))
+ needSlash = true
+ }
+ }
+ expr.WriteString("$")
+ p.Regex = expr.String()
+ return p
+}
+
+// NOTE: This is derived from `fnmatch.translate()` and is similar to
+// the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set.
+func translateGlob(glob string) string {
+ var regex bytes.Buffer
+ escape := false
+
+ for i := 0; i < len(glob); i++ {
+ char := glob[i]
+ // Escape the character.
+ switch {
+ case escape:
+ escape = false
+ regex.WriteString(regexp.QuoteMeta(string(char)))
+ case char == '\\':
+ // Escape character, escape next character.
+ escape = true
+ case char == '*':
+ // Multi-character wildcard. Match any string (except slashes),
+ // including an empty string.
+ regex.WriteString("[^/]*")
+ case char == '?':
+ // Single-character wildcard. Match any single character (except
+ // a slash).
+ regex.WriteString("[^/]")
+ case char == '[':
+ regex.WriteString(translateBracketExpression(&i, glob))
+ default:
+ // Regular character, escape it for regex.
+ regex.WriteString(regexp.QuoteMeta(string(char)))
+ }
+ }
+ return regex.String()
+}
+
+// Bracket expression wildcard. Except for the beginning
+// exclamation mark, the whole bracket expression can be used
+// directly as regex but we have to find where the expression
+// ends.
+// - "[][!]" matches ']', '[' and '!'.
+// - "[]-]" matches ']' and '-'.
+// - "[!]a-]" matches any character except ']', 'a' and '-'.
+func translateBracketExpression(i *int, glob string) string {
+ regex := string(glob[*i])
+ *i++
+ j := *i
+
+ // Pass bracket expression negation.
+ if j < len(glob) && glob[j] == '!' {
+ j++
+ }
+ // Pass first closing bracket if it is at the beginning of the
+ // expression.
+ if j < len(glob) && glob[j] == ']' {
+ j++
+ }
+ // Find closing bracket. Stop once we reach the end or find it.
+ for j < len(glob) && glob[j] != ']' {
+ j++
+ }
+
+ if j < len(glob) {
+ if glob[*i] == '!' {
+ regex = regex + "^"
+ *i++
+ }
+ regex = regexp.QuoteMeta(glob[*i:j])
+ *i = j
+ } else {
+ // Failed to find closing bracket, treat opening bracket as a
+ // bracket literal instead of as an expression.
+ regex = regexp.QuoteMeta(string(glob[*i]))
+ }
+ return "[" + regex + "]"
+}
diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1/sigstore_bundle.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1/sigstore_bundle.pb.go
new file mode 100644
index 00000000000..9d43f66c0f2
--- /dev/null
+++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1/sigstore_bundle.pb.go
@@ -0,0 +1,543 @@
+// Copyright 2022 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.5
+// protoc v6.30.2
+// source: sigstore_bundle.proto
+
+package v1
+
+import (
+ v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1"
+ dsse "github.com/sigstore/protobuf-specs/gen/pb-go/dsse"
+ v11 "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Various timestamped counter signatures over the artifacts signature.
+// Currently only RFC3161 signatures are provided. More formats may be added
+// in the future.
+type TimestampVerificationData struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // A list of RFC3161 signed timestamps provided by the user.
+ // This can be used when the entry has not been stored on a
+ // transparency log, or in conjunction for a stronger trust model.
+ // Clients MUST verify the hashed message in the message imprint
+ // against the signature in the bundle.
+ Rfc3161Timestamps []*v1.RFC3161SignedTimestamp `protobuf:"bytes,1,rep,name=rfc3161_timestamps,json=rfc3161Timestamps,proto3" json:"rfc3161_timestamps,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *TimestampVerificationData) Reset() {
+ *x = TimestampVerificationData{}
+ mi := &file_sigstore_bundle_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TimestampVerificationData) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TimestampVerificationData) ProtoMessage() {}
+
+func (x *TimestampVerificationData) ProtoReflect() protoreflect.Message {
+ mi := &file_sigstore_bundle_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TimestampVerificationData.ProtoReflect.Descriptor instead.
+func (*TimestampVerificationData) Descriptor() ([]byte, []int) {
+ return file_sigstore_bundle_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *TimestampVerificationData) GetRfc3161Timestamps() []*v1.RFC3161SignedTimestamp {
+ if x != nil {
+ return x.Rfc3161Timestamps
+ }
+ return nil
+}
+
+// VerificationMaterial captures details on the materials used to verify
+// signatures. This message may be embedded in a DSSE envelope as a signature
+// extension. Specifically, the `ext` field of the extension will expect this
+// message when the signature extension is for Sigstore. This is identified by
+// the `kind` field in the extension, which must be set to
+// application/vnd.dev.sigstore.verificationmaterial;version=0.1 for Sigstore.
+// When used as a DSSE extension, if the `public_key` field is used to indicate
+// the key identifier, it MUST match the `keyid` field of the signature the
+// extension is attached to.
+type VerificationMaterial struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // The key material for verification purposes.
+ //
+ // This allows key material to be conveyed in one of three forms:
+ //
+ // 1. An unspecified public key identifier, for retrieving a key
+ // from an out-of-band mechanism (such as a keyring);
+ //
+ // 2. A sequence of one or more X.509 certificates, of which the first member
+ // MUST be a leaf certificate conveying the signing key. Subsequent members
+ // SHOULD be in issuing order, meaning that `n + 1` should be an issuer for `n`.
+ //
+ // Signers MUST NOT include root CA certificates in bundles, and SHOULD NOT
+ // include intermediate CA certificates that appear in an independent root of trust
+ // (such as the Public Good Instance's trusted root).
+ //
+ // Verifiers MUST validate the chain carefully to ensure that it chains up
+ // to a CA certificate that they independently trust. Verifiers SHOULD
+ // handle old or non-complying bundles that have superfluous intermediate and/or
+ // root CA certificates by either ignoring them or explicitly considering them
+ // untrusted for the purposes of chain building.
+ //
+ // 3. A single X.509 certificate, which MUST be a leaf certificate conveying
+ // the signing key.
+ //
+ // When used with the Public Good Instance (PGI) of Sigstore for "keyless" signing
+ // via Fulcio, form (1) MUST NOT be used, regardless of bundle version. Form (1)
+ // MAY be used with the PGI for self-managed keys.
+ //
+ // When used in a `0.1` or `0.2` bundle with the PGI and "keyless" signing,
+ // form (2) MUST be used.
+ //
+ // When used in a `0.3` bundle with the PGI and "keyless" signing,
+ // form (3) MUST be used.
+ //
+ // Types that are valid to be assigned to Content:
+ //
+ // *VerificationMaterial_PublicKey
+ // *VerificationMaterial_X509CertificateChain
+ // *VerificationMaterial_Certificate
+ Content isVerificationMaterial_Content `protobuf_oneof:"content"`
+ // An inclusion proof and an optional signed timestamp from the log.
+ // Client verification libraries MAY provide an option to support v0.1
+ // bundles for backwards compatibility, which may contain an inclusion
+ // promise and not an inclusion proof. In this case, the client MUST
+ // validate the promise.
+ // Verifiers SHOULD NOT allow v0.1 bundles if they're used in an
+ // ecosystem which never produced them.
+ TlogEntries []*v11.TransparencyLogEntry `protobuf:"bytes,3,rep,name=tlog_entries,json=tlogEntries,proto3" json:"tlog_entries,omitempty"`
+ // Timestamp may also come from
+ // tlog_entries.inclusion_promise.signed_entry_timestamp.
+ TimestampVerificationData *TimestampVerificationData `protobuf:"bytes,4,opt,name=timestamp_verification_data,json=timestampVerificationData,proto3" json:"timestamp_verification_data,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *VerificationMaterial) Reset() {
+ *x = VerificationMaterial{}
+ mi := &file_sigstore_bundle_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *VerificationMaterial) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VerificationMaterial) ProtoMessage() {}
+
+func (x *VerificationMaterial) ProtoReflect() protoreflect.Message {
+ mi := &file_sigstore_bundle_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VerificationMaterial.ProtoReflect.Descriptor instead.
+func (*VerificationMaterial) Descriptor() ([]byte, []int) {
+ return file_sigstore_bundle_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *VerificationMaterial) GetContent() isVerificationMaterial_Content {
+ if x != nil {
+ return x.Content
+ }
+ return nil
+}
+
+func (x *VerificationMaterial) GetPublicKey() *v1.PublicKeyIdentifier {
+ if x != nil {
+ if x, ok := x.Content.(*VerificationMaterial_PublicKey); ok {
+ return x.PublicKey
+ }
+ }
+ return nil
+}
+
+func (x *VerificationMaterial) GetX509CertificateChain() *v1.X509CertificateChain {
+ if x != nil {
+ if x, ok := x.Content.(*VerificationMaterial_X509CertificateChain); ok {
+ return x.X509CertificateChain
+ }
+ }
+ return nil
+}
+
+func (x *VerificationMaterial) GetCertificate() *v1.X509Certificate {
+ if x != nil {
+ if x, ok := x.Content.(*VerificationMaterial_Certificate); ok {
+ return x.Certificate
+ }
+ }
+ return nil
+}
+
+func (x *VerificationMaterial) GetTlogEntries() []*v11.TransparencyLogEntry {
+ if x != nil {
+ return x.TlogEntries
+ }
+ return nil
+}
+
+func (x *VerificationMaterial) GetTimestampVerificationData() *TimestampVerificationData {
+ if x != nil {
+ return x.TimestampVerificationData
+ }
+ return nil
+}
+
+type isVerificationMaterial_Content interface {
+ isVerificationMaterial_Content()
+}
+
+type VerificationMaterial_PublicKey struct {
+ PublicKey *v1.PublicKeyIdentifier `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3,oneof"`
+}
+
+type VerificationMaterial_X509CertificateChain struct {
+ X509CertificateChain *v1.X509CertificateChain `protobuf:"bytes,2,opt,name=x509_certificate_chain,json=x509CertificateChain,proto3,oneof"`
+}
+
+type VerificationMaterial_Certificate struct {
+ Certificate *v1.X509Certificate `protobuf:"bytes,5,opt,name=certificate,proto3,oneof"`
+}
+
+func (*VerificationMaterial_PublicKey) isVerificationMaterial_Content() {}
+
+func (*VerificationMaterial_X509CertificateChain) isVerificationMaterial_Content() {}
+
+func (*VerificationMaterial_Certificate) isVerificationMaterial_Content() {}
+
+type Bundle struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // MUST be application/vnd.dev.sigstore.bundle.v0.3+json when
+ // when encoded as JSON.
+ // Clients must to be able to accept media type using the previously
+ // defined formats:
+ // * application/vnd.dev.sigstore.bundle+json;version=0.1
+ // * application/vnd.dev.sigstore.bundle+json;version=0.2
+ // * application/vnd.dev.sigstore.bundle+json;version=0.3
+ MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
+ // When a signer is identified by a X.509 certificate, a verifier MUST
+ // verify that the signature was computed at the time the certificate
+ // was valid as described in the Sigstore client spec: "Verification
+ // using a Bundle".
+ //
+ // If the verification material contains a public key identifier
+ // (key hint) and the `content` is a DSSE envelope, the key hints
+ // MUST be exactly the same in the verification material and in the
+ // DSSE envelope.
+ VerificationMaterial *VerificationMaterial `protobuf:"bytes,2,opt,name=verification_material,json=verificationMaterial,proto3" json:"verification_material,omitempty"`
+ // Types that are valid to be assigned to Content:
+ //
+ // *Bundle_MessageSignature
+ // *Bundle_DsseEnvelope
+ Content isBundle_Content `protobuf_oneof:"content"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Bundle) Reset() {
+ *x = Bundle{}
+ mi := &file_sigstore_bundle_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Bundle) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bundle) ProtoMessage() {}
+
+func (x *Bundle) ProtoReflect() protoreflect.Message {
+ mi := &file_sigstore_bundle_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bundle.ProtoReflect.Descriptor instead.
+func (*Bundle) Descriptor() ([]byte, []int) {
+ return file_sigstore_bundle_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Bundle) GetMediaType() string {
+ if x != nil {
+ return x.MediaType
+ }
+ return ""
+}
+
+func (x *Bundle) GetVerificationMaterial() *VerificationMaterial {
+ if x != nil {
+ return x.VerificationMaterial
+ }
+ return nil
+}
+
+func (x *Bundle) GetContent() isBundle_Content {
+ if x != nil {
+ return x.Content
+ }
+ return nil
+}
+
+func (x *Bundle) GetMessageSignature() *v1.MessageSignature {
+ if x != nil {
+ if x, ok := x.Content.(*Bundle_MessageSignature); ok {
+ return x.MessageSignature
+ }
+ }
+ return nil
+}
+
+func (x *Bundle) GetDsseEnvelope() *dsse.Envelope {
+ if x != nil {
+ if x, ok := x.Content.(*Bundle_DsseEnvelope); ok {
+ return x.DsseEnvelope
+ }
+ }
+ return nil
+}
+
+type isBundle_Content interface {
+ isBundle_Content()
+}
+
+type Bundle_MessageSignature struct {
+ MessageSignature *v1.MessageSignature `protobuf:"bytes,3,opt,name=message_signature,json=messageSignature,proto3,oneof"`
+}
+
+type Bundle_DsseEnvelope struct {
+ // A DSSE envelope can contain arbitrary payloads.
+ // Verifiers must verify that the payload type is a
+ // supported and expected type. This is part of the DSSE
+ // protocol which is defined here:
+ //
+ // DSSE envelopes in a bundle MUST have exactly one signature.
+ // This is a limitation from the DSSE spec, as it can contain
+ // multiple signatures. There are two primary reasons:
+ // 1. It simplifies the verification logic and policy
+ // 2. The bundle (currently) can only contain a single
+ // instance of the required verification materials
+ //
+ // During verification a client MUST reject an envelope if
+ // the number of signatures is not equal to one.
+ DsseEnvelope *dsse.Envelope `protobuf:"bytes,4,opt,name=dsse_envelope,json=dsseEnvelope,proto3,oneof"`
+}
+
+func (*Bundle_MessageSignature) isBundle_Content() {}
+
+func (*Bundle_DsseEnvelope) isBundle_Content() {}
+
+var File_sigstore_bundle_proto protoreflect.FileDescriptor
+
+var file_sigstore_bundle_proto_rawDesc = string([]byte{
+ 0x0a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x62, 0x75, 0x6e, 0x64, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67,
+ 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a,
+ 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c,
+ 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x0e, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
+ 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72,
+ 0x65, 0x5f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7a, 0x0a,
+ 0x19, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x12, 0x5d, 0x0a, 0x12, 0x72, 0x66,
+ 0x63, 0x33, 0x31, 0x36, 0x31, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67,
+ 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e,
+ 0x52, 0x46, 0x43, 0x33, 0x31, 0x36, 0x31, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x11, 0x72, 0x66, 0x63, 0x33, 0x31, 0x36, 0x31, 0x54,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x14, 0x56, 0x65,
+ 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69,
+ 0x61, 0x6c, 0x12, 0x51, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67,
+ 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e,
+ 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66,
+ 0x69, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c,
+ 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x69, 0x0a, 0x16, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x63, 0x65,
+ 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73,
+ 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x58,
+ 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68,
+ 0x61, 0x69, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x14, 0x78, 0x35, 0x30, 0x39,
+ 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e,
+ 0x12, 0x50, 0x0a, 0x0b, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73,
+ 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x58,
+ 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x03,
+ 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x12, 0x4e, 0x0a, 0x0c, 0x74, 0x6c, 0x6f, 0x67, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69,
+ 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73,
+ 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31,
+ 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x6f, 0x67,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x74, 0x6c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x69,
+ 0x65, 0x73, 0x12, 0x71, 0x0a, 0x1b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f,
+ 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x61, 0x74,
+ 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69,
+ 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e, 0x76, 0x31,
+ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x52, 0x19, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x44, 0x61, 0x74, 0x61, 0x42, 0x09, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+ 0x22, 0xbf, 0x02, 0x0a, 0x06, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6d,
+ 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x66, 0x0a, 0x15, 0x76, 0x65,
+ 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x65, 0x72,
+ 0x69, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x64, 0x65, 0x76, 0x2e,
+ 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e,
+ 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d,
+ 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x14, 0x76, 0x65,
+ 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69,
+ 0x61, 0x6c, 0x12, 0x5c, 0x0a, 0x11, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e,
+ 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x10,
+ 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
+ 0x12, 0x3f, 0x0a, 0x0d, 0x64, 0x73, 0x73, 0x65, 0x5f, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70,
+ 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x69, 0x6f, 0x2e, 0x69, 0x6e, 0x74,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41,
+ 0x02, 0x48, 0x00, 0x52, 0x0c, 0x64, 0x73, 0x73, 0x65, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70,
+ 0x65, 0x42, 0x09, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4a, 0x04, 0x08, 0x05,
+ 0x10, 0x33, 0x42, 0x7c, 0x0a, 0x1c, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f,
+ 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e,
+ 0x76, 0x31, 0x42, 0x0b, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69,
+ 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d,
+ 0x73, 0x70, 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f,
+ 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x14, 0x53, 0x69, 0x67, 0x73,
+ 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x31,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+})
+
+var (
+ file_sigstore_bundle_proto_rawDescOnce sync.Once
+ file_sigstore_bundle_proto_rawDescData []byte
+)
+
+func file_sigstore_bundle_proto_rawDescGZIP() []byte {
+ file_sigstore_bundle_proto_rawDescOnce.Do(func() {
+ file_sigstore_bundle_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_sigstore_bundle_proto_rawDesc), len(file_sigstore_bundle_proto_rawDesc)))
+ })
+ return file_sigstore_bundle_proto_rawDescData
+}
+
+var file_sigstore_bundle_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_sigstore_bundle_proto_goTypes = []any{
+ (*TimestampVerificationData)(nil), // 0: dev.sigstore.bundle.v1.TimestampVerificationData
+ (*VerificationMaterial)(nil), // 1: dev.sigstore.bundle.v1.VerificationMaterial
+ (*Bundle)(nil), // 2: dev.sigstore.bundle.v1.Bundle
+ (*v1.RFC3161SignedTimestamp)(nil), // 3: dev.sigstore.common.v1.RFC3161SignedTimestamp
+ (*v1.PublicKeyIdentifier)(nil), // 4: dev.sigstore.common.v1.PublicKeyIdentifier
+ (*v1.X509CertificateChain)(nil), // 5: dev.sigstore.common.v1.X509CertificateChain
+ (*v1.X509Certificate)(nil), // 6: dev.sigstore.common.v1.X509Certificate
+ (*v11.TransparencyLogEntry)(nil), // 7: dev.sigstore.rekor.v1.TransparencyLogEntry
+ (*v1.MessageSignature)(nil), // 8: dev.sigstore.common.v1.MessageSignature
+ (*dsse.Envelope)(nil), // 9: io.intoto.Envelope
+}
+var file_sigstore_bundle_proto_depIdxs = []int32{
+ 3, // 0: dev.sigstore.bundle.v1.TimestampVerificationData.rfc3161_timestamps:type_name -> dev.sigstore.common.v1.RFC3161SignedTimestamp
+ 4, // 1: dev.sigstore.bundle.v1.VerificationMaterial.public_key:type_name -> dev.sigstore.common.v1.PublicKeyIdentifier
+ 5, // 2: dev.sigstore.bundle.v1.VerificationMaterial.x509_certificate_chain:type_name -> dev.sigstore.common.v1.X509CertificateChain
+ 6, // 3: dev.sigstore.bundle.v1.VerificationMaterial.certificate:type_name -> dev.sigstore.common.v1.X509Certificate
+ 7, // 4: dev.sigstore.bundle.v1.VerificationMaterial.tlog_entries:type_name -> dev.sigstore.rekor.v1.TransparencyLogEntry
+ 0, // 5: dev.sigstore.bundle.v1.VerificationMaterial.timestamp_verification_data:type_name -> dev.sigstore.bundle.v1.TimestampVerificationData
+ 1, // 6: dev.sigstore.bundle.v1.Bundle.verification_material:type_name -> dev.sigstore.bundle.v1.VerificationMaterial
+ 8, // 7: dev.sigstore.bundle.v1.Bundle.message_signature:type_name -> dev.sigstore.common.v1.MessageSignature
+ 9, // 8: dev.sigstore.bundle.v1.Bundle.dsse_envelope:type_name -> io.intoto.Envelope
+ 9, // [9:9] is the sub-list for method output_type
+ 9, // [9:9] is the sub-list for method input_type
+ 9, // [9:9] is the sub-list for extension type_name
+ 9, // [9:9] is the sub-list for extension extendee
+ 0, // [0:9] is the sub-list for field type_name
+}
+
+func init() { file_sigstore_bundle_proto_init() }
+func file_sigstore_bundle_proto_init() {
+ if File_sigstore_bundle_proto != nil {
+ return
+ }
+ file_sigstore_bundle_proto_msgTypes[1].OneofWrappers = []any{
+ (*VerificationMaterial_PublicKey)(nil),
+ (*VerificationMaterial_X509CertificateChain)(nil),
+ (*VerificationMaterial_Certificate)(nil),
+ }
+ file_sigstore_bundle_proto_msgTypes[2].OneofWrappers = []any{
+ (*Bundle_MessageSignature)(nil),
+ (*Bundle_DsseEnvelope)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_sigstore_bundle_proto_rawDesc), len(file_sigstore_bundle_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_sigstore_bundle_proto_goTypes,
+ DependencyIndexes: file_sigstore_bundle_proto_depIdxs,
+ MessageInfos: file_sigstore_bundle_proto_msgTypes,
+ }.Build()
+ File_sigstore_bundle_proto = out.File
+ file_sigstore_bundle_proto_goTypes = nil
+ file_sigstore_bundle_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go
new file mode 100644
index 00000000000..298e2439ef1
--- /dev/null
+++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go
@@ -0,0 +1,241 @@
+// https://raw.githubusercontent.com/secure-systems-lab/dsse/9c813476bd36de70a5738c72e784f123ecea16af/envelope.proto
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.5
+// protoc v6.30.2
+// source: envelope.proto
+
+package dsse
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// An authenticated message of arbitrary type.
+type Envelope struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Message to be signed. (In JSON, this is encoded as base64.)
+ // REQUIRED.
+ Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
+ // String unambiguously identifying how to interpret payload.
+ // REQUIRED.
+ PayloadType string `protobuf:"bytes,2,opt,name=payloadType,proto3" json:"payloadType,omitempty"`
+ // Signature over:
+ //
+ // PAE(type, payload)
+ //
+ // Where PAE is defined as:
+ // PAE(type, payload) = "DSSEv1" + SP + LEN(type) + SP + type + SP + LEN(payload) + SP + payload
+ // + = concatenation
+ // SP = ASCII space [0x20]
+ // "DSSEv1" = ASCII [0x44, 0x53, 0x53, 0x45, 0x76, 0x31]
+ // LEN(s) = ASCII decimal encoding of the byte length of s, with no leading zeros
+ // REQUIRED (length >= 1).
+ Signatures []*Signature `protobuf:"bytes,3,rep,name=signatures,proto3" json:"signatures,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Envelope) Reset() {
+ *x = Envelope{}
+ mi := &file_envelope_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Envelope) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Envelope) ProtoMessage() {}
+
+func (x *Envelope) ProtoReflect() protoreflect.Message {
+ mi := &file_envelope_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Envelope.ProtoReflect.Descriptor instead.
+func (*Envelope) Descriptor() ([]byte, []int) {
+ return file_envelope_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Envelope) GetPayload() []byte {
+ if x != nil {
+ return x.Payload
+ }
+ return nil
+}
+
+func (x *Envelope) GetPayloadType() string {
+ if x != nil {
+ return x.PayloadType
+ }
+ return ""
+}
+
+func (x *Envelope) GetSignatures() []*Signature {
+ if x != nil {
+ return x.Signatures
+ }
+ return nil
+}
+
+type Signature struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Signature itself. (In JSON, this is encoded as base64.)
+ // REQUIRED.
+ Sig []byte `protobuf:"bytes,1,opt,name=sig,proto3" json:"sig,omitempty"`
+ // *Unauthenticated* hint identifying which public key was used.
+ // OPTIONAL.
+ Keyid string `protobuf:"bytes,2,opt,name=keyid,proto3" json:"keyid,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Signature) Reset() {
+ *x = Signature{}
+ mi := &file_envelope_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Signature) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Signature) ProtoMessage() {}
+
+func (x *Signature) ProtoReflect() protoreflect.Message {
+ mi := &file_envelope_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Signature.ProtoReflect.Descriptor instead.
+func (*Signature) Descriptor() ([]byte, []int) {
+ return file_envelope_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Signature) GetSig() []byte {
+ if x != nil {
+ return x.Sig
+ }
+ return nil
+}
+
+func (x *Signature) GetKeyid() string {
+ if x != nil {
+ return x.Keyid
+ }
+ return ""
+}
+
+var File_envelope_proto protoreflect.FileDescriptor
+
+var file_envelope_proto_rawDesc = string([]byte{
+ 0x0a, 0x0e, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x09, 0x69, 0x6f, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x74, 0x6f, 0x22, 0x7c, 0x0a, 0x08, 0x45,
+ 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f,
+ 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61,
+ 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
+ 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x6f, 0x2e, 0x69, 0x6e, 0x74,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x0a, 0x73,
+ 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0x33, 0x0a, 0x09, 0x53, 0x69, 0x67,
+ 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x69, 0x67, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0c, 0x52, 0x03, 0x73, 0x69, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x6b, 0x65, 0x79, 0x69,
+ 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6b, 0x65, 0x79, 0x69, 0x64, 0x42, 0x44,
+ 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67,
+ 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d, 0x73,
+ 0x70, 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x64,
+ 0x73, 0x73, 0x65, 0xea, 0x02, 0x0e, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a,
+ 0x44, 0x53, 0x53, 0x45, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+})
+
+var (
+ file_envelope_proto_rawDescOnce sync.Once
+ file_envelope_proto_rawDescData []byte
+)
+
+func file_envelope_proto_rawDescGZIP() []byte {
+ file_envelope_proto_rawDescOnce.Do(func() {
+ file_envelope_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_envelope_proto_rawDesc), len(file_envelope_proto_rawDesc)))
+ })
+ return file_envelope_proto_rawDescData
+}
+
+var file_envelope_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_envelope_proto_goTypes = []any{
+ (*Envelope)(nil), // 0: io.intoto.Envelope
+ (*Signature)(nil), // 1: io.intoto.Signature
+}
+var file_envelope_proto_depIdxs = []int32{
+ 1, // 0: io.intoto.Envelope.signatures:type_name -> io.intoto.Signature
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_envelope_proto_init() }
+func file_envelope_proto_init() {
+ if File_envelope_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_envelope_proto_rawDesc), len(file_envelope_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envelope_proto_goTypes,
+ DependencyIndexes: file_envelope_proto_depIdxs,
+ MessageInfos: file_envelope_proto_msgTypes,
+ }.Build()
+ File_envelope_proto = out.File
+ file_envelope_proto_goTypes = nil
+ file_envelope_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go
new file mode 100644
index 00000000000..43b3111ebf8
--- /dev/null
+++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go
@@ -0,0 +1,560 @@
+// Copyright 2022 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.5
+// protoc v6.30.2
+// source: sigstore_rekor.proto
+
+package v1
+
+import (
+ v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// KindVersion contains the entry's kind and api version.
+type KindVersion struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Kind is the type of entry being stored in the log.
+ // See here for a list: https://github.com/sigstore/rekor/tree/main/pkg/types
+ Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"`
+ // The specific api version of the type.
+ Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *KindVersion) Reset() {
+ *x = KindVersion{}
+ mi := &file_sigstore_rekor_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *KindVersion) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KindVersion) ProtoMessage() {}
+
+func (x *KindVersion) ProtoReflect() protoreflect.Message {
+ mi := &file_sigstore_rekor_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KindVersion.ProtoReflect.Descriptor instead.
+func (*KindVersion) Descriptor() ([]byte, []int) {
+ return file_sigstore_rekor_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *KindVersion) GetKind() string {
+ if x != nil {
+ return x.Kind
+ }
+ return ""
+}
+
+func (x *KindVersion) GetVersion() string {
+ if x != nil {
+ return x.Version
+ }
+ return ""
+}
+
+// The checkpoint MUST contain an origin string as a unique log identifier,
+// the tree size, and the root hash. It MAY also be followed by optional data,
+// and clients MUST NOT assume optional data. The checkpoint MUST also contain
+// a signature over the root hash (tree head). The checkpoint MAY contain additional
+// signatures, but the first SHOULD be the signature from the log. Checkpoint contents
+// are concatenated with newlines into a single string.
+// The checkpoint format is described in
+// https://github.com/transparency-dev/formats/blob/main/log/README.md
+// and https://github.com/C2SP/C2SP/blob/main/tlog-checkpoint.md.
+// An example implementation can be found in https://github.com/sigstore/rekor/blob/main/pkg/util/signed_note.go
+type Checkpoint struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Envelope string `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Checkpoint) Reset() {
+ *x = Checkpoint{}
+ mi := &file_sigstore_rekor_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Checkpoint) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Checkpoint) ProtoMessage() {}
+
+func (x *Checkpoint) ProtoReflect() protoreflect.Message {
+ mi := &file_sigstore_rekor_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Checkpoint.ProtoReflect.Descriptor instead.
+func (*Checkpoint) Descriptor() ([]byte, []int) {
+ return file_sigstore_rekor_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Checkpoint) GetEnvelope() string {
+ if x != nil {
+ return x.Envelope
+ }
+ return ""
+}
+
+// InclusionProof is the proof returned from the transparency log. Can
+// be used for offline or online verification against the log.
+type InclusionProof struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // The index of the entry in the tree it was written to.
+ LogIndex int64 `protobuf:"varint,1,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"`
+ // The hash digest stored at the root of the merkle tree at the time
+ // the proof was generated.
+ RootHash []byte `protobuf:"bytes,2,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"`
+ // The size of the merkle tree at the time the proof was generated.
+ TreeSize int64 `protobuf:"varint,3,opt,name=tree_size,json=treeSize,proto3" json:"tree_size,omitempty"`
+ // A list of hashes required to compute the inclusion proof, sorted
+ // in order from leaf to root.
+ // Note that leaf and root hashes are not included.
+ // The root hash is available separately in this message, and the
+ // leaf hash should be calculated by the client.
+ Hashes [][]byte `protobuf:"bytes,4,rep,name=hashes,proto3" json:"hashes,omitempty"`
+ // Signature of the tree head, as of the time of this proof was
+ // generated. See above info on 'Checkpoint' for more details.
+ Checkpoint *Checkpoint `protobuf:"bytes,5,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *InclusionProof) Reset() {
+ *x = InclusionProof{}
+ mi := &file_sigstore_rekor_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *InclusionProof) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*InclusionProof) ProtoMessage() {}
+
+func (x *InclusionProof) ProtoReflect() protoreflect.Message {
+ mi := &file_sigstore_rekor_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use InclusionProof.ProtoReflect.Descriptor instead.
+func (*InclusionProof) Descriptor() ([]byte, []int) {
+ return file_sigstore_rekor_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *InclusionProof) GetLogIndex() int64 {
+ if x != nil {
+ return x.LogIndex
+ }
+ return 0
+}
+
+func (x *InclusionProof) GetRootHash() []byte {
+ if x != nil {
+ return x.RootHash
+ }
+ return nil
+}
+
+func (x *InclusionProof) GetTreeSize() int64 {
+ if x != nil {
+ return x.TreeSize
+ }
+ return 0
+}
+
+func (x *InclusionProof) GetHashes() [][]byte {
+ if x != nil {
+ return x.Hashes
+ }
+ return nil
+}
+
+func (x *InclusionProof) GetCheckpoint() *Checkpoint {
+ if x != nil {
+ return x.Checkpoint
+ }
+ return nil
+}
+
+// The inclusion promise is calculated by Rekor. It's calculated as a
+// signature over a canonical JSON serialization of the persisted entry, the
+// log ID, log index and the integration timestamp.
+// See https://github.com/sigstore/rekor/blob/a6e58f72b6b18cc06cefe61808efd562b9726330/pkg/api/entries.go#L54
+// The format of the signature depends on the transparency log's public key.
+// If the signature algorithm requires a hash function and/or a signature
+// scheme (e.g. RSA) those has to be retrieved out-of-band from the log's
+// operators, together with the public key.
+// This is used to verify the integration timestamp's value and that the log
+// has promised to include the entry.
+type InclusionPromise struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ SignedEntryTimestamp []byte `protobuf:"bytes,1,opt,name=signed_entry_timestamp,json=signedEntryTimestamp,proto3" json:"signed_entry_timestamp,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *InclusionPromise) Reset() {
+ *x = InclusionPromise{}
+ mi := &file_sigstore_rekor_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *InclusionPromise) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*InclusionPromise) ProtoMessage() {}
+
+func (x *InclusionPromise) ProtoReflect() protoreflect.Message {
+ mi := &file_sigstore_rekor_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use InclusionPromise.ProtoReflect.Descriptor instead.
+func (*InclusionPromise) Descriptor() ([]byte, []int) {
+ return file_sigstore_rekor_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *InclusionPromise) GetSignedEntryTimestamp() []byte {
+ if x != nil {
+ return x.SignedEntryTimestamp
+ }
+ return nil
+}
+
+// TransparencyLogEntry captures all the details required from Rekor to
+// reconstruct an entry, given that the payload is provided via other means.
+// This type can easily be created from the existing response from Rekor.
+// Future iterations could rely on Rekor returning the minimal set of
+// attributes (excluding the payload) that are required for verifying the
+// inclusion promise. The inclusion promise (called SignedEntryTimestamp in
+// the response from Rekor) is similar to a Signed Certificate Timestamp
+// as described here https://www.rfc-editor.org/rfc/rfc6962.html#section-3.2.
+type TransparencyLogEntry struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // The global index of the entry, used when querying the log by index.
+ LogIndex int64 `protobuf:"varint,1,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"`
+ // The unique identifier of the log.
+ LogId *v1.LogId `protobuf:"bytes,2,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"`
+ // The kind (type) and version of the object associated with this
+ // entry. These values are required to construct the entry during
+ // verification.
+ KindVersion *KindVersion `protobuf:"bytes,3,opt,name=kind_version,json=kindVersion,proto3" json:"kind_version,omitempty"`
+ // The UNIX timestamp from the log when the entry was persisted.
+ // The integration time MUST NOT be trusted if inclusion_promise
+ // is omitted.
+ IntegratedTime int64 `protobuf:"varint,4,opt,name=integrated_time,json=integratedTime,proto3" json:"integrated_time,omitempty"`
+ // The inclusion promise/signed entry timestamp from the log.
+ // Required for v0.1 bundles, and MUST be verified.
+ // Optional for >= v0.2 bundles if another suitable source of
+ // time is present (such as another source of signed time,
+ // or the current system time for long-lived certificates).
+ // MUST be verified if no other suitable source of time is present,
+ // and SHOULD be verified otherwise.
+ InclusionPromise *InclusionPromise `protobuf:"bytes,5,opt,name=inclusion_promise,json=inclusionPromise,proto3" json:"inclusion_promise,omitempty"`
+ // The inclusion proof can be used for offline or online verification
+ // that the entry was appended to the log, and that the log has not been
+ // altered.
+ InclusionProof *InclusionProof `protobuf:"bytes,6,opt,name=inclusion_proof,json=inclusionProof,proto3" json:"inclusion_proof,omitempty"`
+ // Optional. The canonicalized transparency log entry, used to
+ // reconstruct the Signed Entry Timestamp (SET) during verification.
+ // The contents of this field are the same as the `body` field in
+ // a Rekor response, meaning that it does **not** include the "full"
+ // canonicalized form (of log index, ID, etc.) which are
+ // exposed as separate fields. The verifier is responsible for
+ // combining the `canonicalized_body`, `log_index`, `log_id`,
+ // and `integrated_time` into the payload that the SET's signature
+ // is generated over.
+ // This field is intended to be used in cases where the SET cannot be
+ // produced determinisitically (e.g. inconsistent JSON field ordering,
+ // differing whitespace, etc).
+ //
+ // If set, clients MUST verify that the signature referenced in the
+ // `canonicalized_body` matches the signature provided in the
+ // `Bundle.content`.
+ // If not set, clients are responsible for constructing an equivalent
+ // payload from other sources to verify the signature.
+ CanonicalizedBody []byte `protobuf:"bytes,7,opt,name=canonicalized_body,json=canonicalizedBody,proto3" json:"canonicalized_body,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *TransparencyLogEntry) Reset() {
+ *x = TransparencyLogEntry{}
+ mi := &file_sigstore_rekor_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TransparencyLogEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TransparencyLogEntry) ProtoMessage() {}
+
+func (x *TransparencyLogEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_sigstore_rekor_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TransparencyLogEntry.ProtoReflect.Descriptor instead.
+func (*TransparencyLogEntry) Descriptor() ([]byte, []int) {
+ return file_sigstore_rekor_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *TransparencyLogEntry) GetLogIndex() int64 {
+ if x != nil {
+ return x.LogIndex
+ }
+ return 0
+}
+
+func (x *TransparencyLogEntry) GetLogId() *v1.LogId {
+ if x != nil {
+ return x.LogId
+ }
+ return nil
+}
+
+func (x *TransparencyLogEntry) GetKindVersion() *KindVersion {
+ if x != nil {
+ return x.KindVersion
+ }
+ return nil
+}
+
+func (x *TransparencyLogEntry) GetIntegratedTime() int64 {
+ if x != nil {
+ return x.IntegratedTime
+ }
+ return 0
+}
+
+func (x *TransparencyLogEntry) GetInclusionPromise() *InclusionPromise {
+ if x != nil {
+ return x.InclusionPromise
+ }
+ return nil
+}
+
+func (x *TransparencyLogEntry) GetInclusionProof() *InclusionProof {
+ if x != nil {
+ return x.InclusionProof
+ }
+ return nil
+}
+
+func (x *TransparencyLogEntry) GetCanonicalizedBody() []byte {
+ if x != nil {
+ return x.CanonicalizedBody
+ }
+ return nil
+}
+
+var File_sigstore_rekor_proto protoreflect.FileDescriptor
+
+var file_sigstore_rekor_proto_rawDesc = string([]byte{
+ 0x0a, 0x14, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x6b, 0x6f, 0x72,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73,
+ 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
+ 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15,
+ 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x45, 0x0a, 0x0b, 0x4b, 0x69, 0x6e, 0x64, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x1d, 0x0a,
+ 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
+ 0xe0, 0x41, 0x02, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x2d, 0x0a, 0x0a,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1f, 0x0a, 0x08, 0x65, 0x6e,
+ 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
+ 0x02, 0x52, 0x08, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x22, 0xdb, 0x01, 0x0a, 0x0e,
+ 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x20,
+ 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78,
+ 0x12, 0x20, 0x0a, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, 0x6f, 0x6f, 0x74, 0x48, 0x61,
+ 0x73, 0x68, 0x12, 0x20, 0x0a, 0x09, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x74, 0x72, 0x65, 0x65,
+ 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x04,
+ 0x20, 0x03, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65,
+ 0x73, 0x12, 0x46, 0x0a, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73,
+ 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x68,
+ 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x63,
+ 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x22, 0x4d, 0x0a, 0x10, 0x49, 0x6e, 0x63,
+ 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x12, 0x39, 0x0a,
+ 0x16, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0,
+ 0x41, 0x02, 0x52, 0x14, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x54,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xc7, 0x03, 0x0a, 0x14, 0x54, 0x72, 0x61,
+ 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x12, 0x20, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x49, 0x6e,
+ 0x64, 0x65, 0x78, 0x12, 0x39, 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f,
+ 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x6f, 0x67,
+ 0x49, 0x64, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x4a,
+ 0x0a, 0x0c, 0x6b, 0x69, 0x6e, 0x64, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74,
+ 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x69, 0x6e,
+ 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x6b,
+ 0x69, 0x6e, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x0f, 0x69, 0x6e,
+ 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72,
+ 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x54, 0x0a, 0x11, 0x69, 0x6e, 0x63, 0x6c,
+ 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f,
+ 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x63, 0x6c,
+ 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x52, 0x10, 0x69, 0x6e,
+ 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x12, 0x53,
+ 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6f,
+ 0x66, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69,
+ 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e,
+ 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x03,
+ 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72,
+ 0x6f, 0x6f, 0x66, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c,
+ 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52,
+ 0x11, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x42, 0x6f,
+ 0x64, 0x79, 0x42, 0x78, 0x0a, 0x1b, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f,
+ 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76,
+ 0x31, 0x42, 0x0a, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
+ 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73,
+ 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d, 0x73, 0x70,
+ 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x72, 0x65,
+ 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x13, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72,
+ 0x65, 0x3a, 0x3a, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+})
+
+var (
+ file_sigstore_rekor_proto_rawDescOnce sync.Once
+ file_sigstore_rekor_proto_rawDescData []byte
+)
+
+func file_sigstore_rekor_proto_rawDescGZIP() []byte {
+ file_sigstore_rekor_proto_rawDescOnce.Do(func() {
+ file_sigstore_rekor_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_sigstore_rekor_proto_rawDesc), len(file_sigstore_rekor_proto_rawDesc)))
+ })
+ return file_sigstore_rekor_proto_rawDescData
+}
+
+var file_sigstore_rekor_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_sigstore_rekor_proto_goTypes = []any{
+ (*KindVersion)(nil), // 0: dev.sigstore.rekor.v1.KindVersion
+ (*Checkpoint)(nil), // 1: dev.sigstore.rekor.v1.Checkpoint
+ (*InclusionProof)(nil), // 2: dev.sigstore.rekor.v1.InclusionProof
+ (*InclusionPromise)(nil), // 3: dev.sigstore.rekor.v1.InclusionPromise
+ (*TransparencyLogEntry)(nil), // 4: dev.sigstore.rekor.v1.TransparencyLogEntry
+ (*v1.LogId)(nil), // 5: dev.sigstore.common.v1.LogId
+}
+var file_sigstore_rekor_proto_depIdxs = []int32{
+ 1, // 0: dev.sigstore.rekor.v1.InclusionProof.checkpoint:type_name -> dev.sigstore.rekor.v1.Checkpoint
+ 5, // 1: dev.sigstore.rekor.v1.TransparencyLogEntry.log_id:type_name -> dev.sigstore.common.v1.LogId
+ 0, // 2: dev.sigstore.rekor.v1.TransparencyLogEntry.kind_version:type_name -> dev.sigstore.rekor.v1.KindVersion
+ 3, // 3: dev.sigstore.rekor.v1.TransparencyLogEntry.inclusion_promise:type_name -> dev.sigstore.rekor.v1.InclusionPromise
+ 2, // 4: dev.sigstore.rekor.v1.TransparencyLogEntry.inclusion_proof:type_name -> dev.sigstore.rekor.v1.InclusionProof
+ 5, // [5:5] is the sub-list for method output_type
+ 5, // [5:5] is the sub-list for method input_type
+ 5, // [5:5] is the sub-list for extension type_name
+ 5, // [5:5] is the sub-list for extension extendee
+ 0, // [0:5] is the sub-list for field type_name
+}
+
+func init() { file_sigstore_rekor_proto_init() }
+func file_sigstore_rekor_proto_init() {
+ if File_sigstore_rekor_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_sigstore_rekor_proto_rawDesc), len(file_sigstore_rekor_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_sigstore_rekor_proto_goTypes,
+ DependencyIndexes: file_sigstore_rekor_proto_depIdxs,
+ MessageInfos: file_sigstore_rekor_proto_msgTypes,
+ }.Build()
+ File_sigstore_rekor_proto = out.File
+ file_sigstore_rekor_proto_goTypes = nil
+ file_sigstore_rekor_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1/sigstore_trustroot.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1/sigstore_trustroot.pb.go
new file mode 100644
index 00000000000..580d1c69f5a
--- /dev/null
+++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1/sigstore_trustroot.pb.go
@@ -0,0 +1,1089 @@
+// Copyright 2022 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.5
+// protoc v6.30.2
+// source: sigstore_trustroot.proto
+
+package v1
+
+import (
+ v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// ServiceSelector specifies how a client SHOULD select a set of
+// Services to connect to. A client SHOULD throw an error if
+// the value is SERVICE_SELECTOR_UNDEFINED.
+type ServiceSelector int32
+
+const (
+ ServiceSelector_SERVICE_SELECTOR_UNDEFINED ServiceSelector = 0
+ // Clients SHOULD select all Services based on supported API version
+ // and validity window.
+ ServiceSelector_ALL ServiceSelector = 1
+ // Clients SHOULD select one Service based on supported API version
+ // and validity window. It is up to the client implementation to
+ // decide how to select the Service, e.g. random or round-robin.
+ ServiceSelector_ANY ServiceSelector = 2
+ // Clients SHOULD select a specific number of Services based on
+ // supported API version and validity window, using the provided
+ // `count`. It is up to the client implementation to decide how to
+ // select the Service, e.g. random or round-robin.
+ ServiceSelector_EXACT ServiceSelector = 3
+)
+
+// Enum value maps for ServiceSelector.
+var (
+ ServiceSelector_name = map[int32]string{
+ 0: "SERVICE_SELECTOR_UNDEFINED",
+ 1: "ALL",
+ 2: "ANY",
+ 3: "EXACT",
+ }
+ ServiceSelector_value = map[string]int32{
+ "SERVICE_SELECTOR_UNDEFINED": 0,
+ "ALL": 1,
+ "ANY": 2,
+ "EXACT": 3,
+ }
+)
+
+func (x ServiceSelector) Enum() *ServiceSelector {
+ p := new(ServiceSelector)
+ *p = x
+ return p
+}
+
+func (x ServiceSelector) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ServiceSelector) Descriptor() protoreflect.EnumDescriptor {
+ return file_sigstore_trustroot_proto_enumTypes[0].Descriptor()
+}
+
+func (ServiceSelector) Type() protoreflect.EnumType {
+ return &file_sigstore_trustroot_proto_enumTypes[0]
+}
+
+func (x ServiceSelector) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ServiceSelector.Descriptor instead.
+func (ServiceSelector) EnumDescriptor() ([]byte, []int) {
+ return file_sigstore_trustroot_proto_rawDescGZIP(), []int{0}
+}
+
+// TransparencyLogInstance describes the immutable parameters from a
+// transparency log.
+// See https://www.rfc-editor.org/rfc/rfc9162.html#name-log-parameters
+// for more details.
+// The included parameters are the minimal set required to identify a log,
+// and verify an inclusion proof/promise.
+type TransparencyLogInstance struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // The base URL at which can be used to URLs for the client.
+ // SHOULD match the origin on the log checkpoint:
+ // https://github.com/C2SP/C2SP/blob/main/tlog-checkpoint.md#note-text.
+ BaseUrl string `protobuf:"bytes,1,opt,name=base_url,json=baseUrl,proto3" json:"base_url,omitempty"`
+ // The hash algorithm used for the Merkle Tree.
+ HashAlgorithm v1.HashAlgorithm `protobuf:"varint,2,opt,name=hash_algorithm,json=hashAlgorithm,proto3,enum=dev.sigstore.common.v1.HashAlgorithm" json:"hash_algorithm,omitempty"`
+ // The public key used to verify signatures generated by the log.
+ // This attribute contains the signature algorithm used by the log.
+ PublicKey *v1.PublicKey `protobuf:"bytes,3,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"`
+ // The identifier for this transparency log.
+ // Represented as the SHA-256 hash of the log's public key,
+ // calculated over the DER encoding of the key represented as
+ // SubjectPublicKeyInfo.
+ // See https://www.rfc-editor.org/rfc/rfc6962#section-3.2
+ // For Rekor v2 instances, log_id and checkpoint_key_id will be set
+ // to the same value.
+ // It is recommended to use checkpoint_key_id instead, since log_id is not
+ // guaranteed to be unique across multiple deployments. Clients
+ // must use the key name and key ID, as defined by the signed-note spec
+ // linked below, from a checkpoint to determine the correct
+ // TransparencyLogInstance to verify a proof.
+ // log_id will eventually be deprecated in favor of checkpoint_id.
+ LogId *v1.LogId `protobuf:"bytes,4,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"`
+ // The unique identifier for the log, used in the checkpoint.
+ // Only supported for TrustedRoot media types matching or greater than
+ // application/vnd.dev.sigstore.trustedroot.v0.2+json
+ // Its calculation is described in
+ // https://github.com/C2SP/C2SP/blob/main/signed-note.md#signatures
+ // SHOULD be set for all logs. When not set, clients MUST use log_id.
+ //
+ // For Ed25519 signatures, the key ID is computed per the C2SP spec:
+ // key ID = SHA-256(key name || 0x0A || 0x01 || 32-byte Ed25519 public key)[:4]
+ // For ECDSA signatures, the key ID is computed per the C2SP spec:
+ // key ID = SHA-256(PKIX ASN.1 DER-encoded public key, in SubjectPublicKeyInfo format)[:4]
+ // For RSA signatures, the signature type will be 0xff with an appended identifier for the format,
+ // "PKIX-RSA-PKCS#1v1.5":
+ // key ID = SHA-256(key name || 0x0A || 0xff || PKIX-RSA-PKCS#1v1.5 || PKIX ASN.1 DER-encoded public key)[:4]
+ //
+ // This is provided for convenience. Clients can also calculate the
+ // checkpoint key ID given the log's public key.
+ // SHOULD be 4 bytes long, as a truncated hash.
+ //
+ // To find a matching TransparencyLogInstance in the TrustedRoot,
+ // clients will parse the checkpoint, and for each signature line,
+ // use the key name (i.e. log origin, base_url from TrustedRoot)
+ // and checkpoint key ID (i.e. checkpoint_key_id from TrustedRoot)
+ // which can then be compared against the TrustedRoot log instances.
+ CheckpointKeyId *v1.LogId `protobuf:"bytes,5,opt,name=checkpoint_key_id,json=checkpointKeyId,proto3" json:"checkpoint_key_id,omitempty"`
+ // The name of the operator of this log deployment. Operator MUST be
+ // formatted as a scheme-less URI, e.g. sigstore.dev
+ // Only supported for TrustedRoot media types matching or greater than
+ // application/vnd.dev.sigstore.trustedroot.v0.2+json
+ // This MUST be used when there are multiple transparency log instances
+ // to determine if log proof verification meets a specified threshold,
+ // e.g. two proofs from log deployments operated by the same operator
+ // should count as only one valid proof.
+ Operator string `protobuf:"bytes,6,opt,name=operator,proto3" json:"operator,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *TransparencyLogInstance) Reset() {
+ *x = TransparencyLogInstance{}
+ mi := &file_sigstore_trustroot_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TransparencyLogInstance) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TransparencyLogInstance) ProtoMessage() {}
+
+func (x *TransparencyLogInstance) ProtoReflect() protoreflect.Message {
+ mi := &file_sigstore_trustroot_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TransparencyLogInstance.ProtoReflect.Descriptor instead.
+func (*TransparencyLogInstance) Descriptor() ([]byte, []int) {
+ return file_sigstore_trustroot_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *TransparencyLogInstance) GetBaseUrl() string {
+ if x != nil {
+ return x.BaseUrl
+ }
+ return ""
+}
+
+func (x *TransparencyLogInstance) GetHashAlgorithm() v1.HashAlgorithm {
+ if x != nil {
+ return x.HashAlgorithm
+ }
+ return v1.HashAlgorithm(0)
+}
+
+func (x *TransparencyLogInstance) GetPublicKey() *v1.PublicKey {
+ if x != nil {
+ return x.PublicKey
+ }
+ return nil
+}
+
+func (x *TransparencyLogInstance) GetLogId() *v1.LogId {
+ if x != nil {
+ return x.LogId
+ }
+ return nil
+}
+
+func (x *TransparencyLogInstance) GetCheckpointKeyId() *v1.LogId {
+ if x != nil {
+ return x.CheckpointKeyId
+ }
+ return nil
+}
+
+func (x *TransparencyLogInstance) GetOperator() string {
+ if x != nil {
+ return x.Operator
+ }
+ return ""
+}
+
+// CertificateAuthority enlists the information required to identify which
+// CA to use and perform signature verification.
+type CertificateAuthority struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // The root certificate MUST be self-signed, and so the subject and
+ // issuer are the same.
+ Subject *v1.DistinguishedName `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"`
+ // The URI identifies the certificate authority.
+ //
+ // It is RECOMMENDED that the URI is the base URL for the certificate
+ // authority, that can be provided to any SDK/client provided
+ // by the certificate authority to interact with the certificate
+ // authority.
+ Uri string `protobuf:"bytes,2,opt,name=uri,proto3" json:"uri,omitempty"`
+ // The certificate chain for this CA. The last certificate in the chain
+ // MUST be the trust anchor. The trust anchor MAY be a self-signed root
+ // CA certificate or MAY be an intermediate CA certificate.
+ CertChain *v1.X509CertificateChain `protobuf:"bytes,3,opt,name=cert_chain,json=certChain,proto3" json:"cert_chain,omitempty"`
+ // The time the *entire* chain was valid. This is at max the
+ // longest interval when *all* certificates in the chain were valid,
+ // but it MAY be shorter. Clients MUST check timestamps against *both*
+ // the `valid_for` time range *and* the entire certificate chain.
+ //
+ // The TimeRange should be considered valid *inclusive* of the
+ // endpoints.
+ ValidFor *v1.TimeRange `protobuf:"bytes,4,opt,name=valid_for,json=validFor,proto3" json:"valid_for,omitempty"`
+ // The name of the operator of this certificate or timestamp authority.
+ // Operator MUST be formatted as a scheme-less URI, e.g. sigstore.dev
+ // This MUST be used when there are multiple timestamp authorities to
+ // determine if the signed timestamp verification meets a specified
+ // threshold, e.g. two signed timestamps from timestamp authorities
+ // operated by the same operator should count as only one valid
+ // timestamp.
+ Operator string `protobuf:"bytes,5,opt,name=operator,proto3" json:"operator,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *CertificateAuthority) Reset() {
+ *x = CertificateAuthority{}
+ mi := &file_sigstore_trustroot_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CertificateAuthority) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CertificateAuthority) ProtoMessage() {}
+
+func (x *CertificateAuthority) ProtoReflect() protoreflect.Message {
+ mi := &file_sigstore_trustroot_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CertificateAuthority.ProtoReflect.Descriptor instead.
+func (*CertificateAuthority) Descriptor() ([]byte, []int) {
+ return file_sigstore_trustroot_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *CertificateAuthority) GetSubject() *v1.DistinguishedName {
+ if x != nil {
+ return x.Subject
+ }
+ return nil
+}
+
+func (x *CertificateAuthority) GetUri() string {
+ if x != nil {
+ return x.Uri
+ }
+ return ""
+}
+
+func (x *CertificateAuthority) GetCertChain() *v1.X509CertificateChain {
+ if x != nil {
+ return x.CertChain
+ }
+ return nil
+}
+
+func (x *CertificateAuthority) GetValidFor() *v1.TimeRange {
+ if x != nil {
+ return x.ValidFor
+ }
+ return nil
+}
+
+func (x *CertificateAuthority) GetOperator() string {
+ if x != nil {
+ return x.Operator
+ }
+ return ""
+}
+
+// TrustedRoot describes the client's complete set of trusted entities.
+// How the TrustedRoot is populated is not specified, but can be a
+// combination of many sources such as TUF repositories, files on disk etc.
+//
+// The TrustedRoot is not meant to be used for any artifact verification, only
+// to capture the complete/global set of trusted verification materials.
+// When verifying an artifact, based on the artifact and policies, a selection
+// of keys/authorities are expected to be extracted and provided to the
+// verification function. This way the set of keys/authorities can be kept to
+// a minimal set by the policy to gain better control over what signatures
+// that are allowed.
+//
+// The embedded transparency logs, CT logs, CAs and TSAs MUST include any
+// previously used instance -- otherwise signatures made in the past cannot
+// be verified.
+//
+// All the listed instances SHOULD be sorted by the 'valid_for' in ascending
+// order, that is, the oldest instance first. Only the last instance is
+// allowed to have their 'end' timestamp unset. All previous instances MUST
+// have a closed interval of validity. The last instance MAY have a closed
+// interval. Clients MUST accept instances that overlaps in time, if not
+// clients may experience problems during rotations of verification
+// materials.
+//
+// To be able to manage planned rotations of either transparency logs or
+// certificate authorities, clienst MUST accept lists of instances where
+// the last instance have a 'valid_for' that belongs to the future.
+// This should not be a problem as clients SHOULD first seek the trust root
+// for a suitable instance before creating a per artifact trust root (that
+// is, a sub-set of the complete trust root) that is used for verification.
+type TrustedRoot struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // MUST be application/vnd.dev.sigstore.trustedroot.v0.2+json
+ // when encoded as JSON.
+ // Clients MAY choose to also support
+ // application/vnd.dev.sigstore.trustedroot.v0.1+json
+ // Clients MAY process and parse content with the media type defined
+ // in the old format:
+ // application/vnd.dev.sigstore.trustedroot+json;version=0.1
+ MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
+ // A set of trusted Rekor servers.
+ Tlogs []*TransparencyLogInstance `protobuf:"bytes,2,rep,name=tlogs,proto3" json:"tlogs,omitempty"`
+ // A set of trusted certificate authorities (e.g Fulcio), and any
+ // intermediate certificates they provide.
+ // If a CA is issuing multiple intermediate certificate, each
+ // combination shall be represented as separate chain. I.e, a single
+ // root cert may appear in multiple chains but with different
+ // intermediate and/or leaf certificates.
+ // The certificates are intended to be used for verifying artifact
+ // signatures.
+ CertificateAuthorities []*CertificateAuthority `protobuf:"bytes,3,rep,name=certificate_authorities,json=certificateAuthorities,proto3" json:"certificate_authorities,omitempty"`
+ // A set of trusted certificate transparency logs.
+ Ctlogs []*TransparencyLogInstance `protobuf:"bytes,4,rep,name=ctlogs,proto3" json:"ctlogs,omitempty"`
+ // A set of trusted timestamping authorities.
+ TimestampAuthorities []*CertificateAuthority `protobuf:"bytes,5,rep,name=timestamp_authorities,json=timestampAuthorities,proto3" json:"timestamp_authorities,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *TrustedRoot) Reset() {
+ *x = TrustedRoot{}
+ mi := &file_sigstore_trustroot_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TrustedRoot) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TrustedRoot) ProtoMessage() {}
+
+func (x *TrustedRoot) ProtoReflect() protoreflect.Message {
+ mi := &file_sigstore_trustroot_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TrustedRoot.ProtoReflect.Descriptor instead.
+func (*TrustedRoot) Descriptor() ([]byte, []int) {
+ return file_sigstore_trustroot_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *TrustedRoot) GetMediaType() string {
+ if x != nil {
+ return x.MediaType
+ }
+ return ""
+}
+
+func (x *TrustedRoot) GetTlogs() []*TransparencyLogInstance {
+ if x != nil {
+ return x.Tlogs
+ }
+ return nil
+}
+
+func (x *TrustedRoot) GetCertificateAuthorities() []*CertificateAuthority {
+ if x != nil {
+ return x.CertificateAuthorities
+ }
+ return nil
+}
+
+func (x *TrustedRoot) GetCtlogs() []*TransparencyLogInstance {
+ if x != nil {
+ return x.Ctlogs
+ }
+ return nil
+}
+
+func (x *TrustedRoot) GetTimestampAuthorities() []*CertificateAuthority {
+ if x != nil {
+ return x.TimestampAuthorities
+ }
+ return nil
+}
+
+// SigningConfig represents the trusted entities/state needed by Sigstore
+// signing. In particular, it primarily contains service URLs that a Sigstore
+// signer may need to connect to for the online aspects of signing.
+type SigningConfig struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // MUST be application/vnd.dev.sigstore.signingconfig.v0.2+json
+ // Clients MAY choose to also support
+ // application/vnd.dev.sigstore.signingconfig.v0.1+json
+ MediaType string `protobuf:"bytes,5,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
+ // URLs to Fulcio-compatible CAs, capable of receiving
+ // Certificate Signing Requests (CSRs) and responding with
+ // issued certificates.
+ //
+ // These URLs MUST be the "base" URL for the CAs, which clients
+ // should construct an appropriate CSR endpoint on top of.
+ // For example, if a CA URL is `https://example.com/ca`, then
+ // the client MAY construct the CSR endpoint as
+ // `https://example.com/ca/api/v2/signingCert`.
+ //
+ // Clients MUST select only one Service with the highest API version
+ // that the client is compatible with, that is within its
+ // validity period, and has the newest validity start date.
+ // Client SHOULD select the first Service that meets this requirement.
+ // All listed Services SHOULD be sorted by the `valid_for` window in
+ // descending order, with the newest instance first.
+ CaUrls []*Service `protobuf:"bytes,6,rep,name=ca_urls,json=caUrls,proto3" json:"ca_urls,omitempty"`
+ // URLs to OpenID Connect identity providers.
+ //
+ // These URLs MUST be the "base" URLs for the OIDC IdPs, which clients
+ // should perform well-known OpenID Connect discovery against.
+ //
+ // Clients MUST select only one Service with the highest API version
+ // that the client is compatible with, that is within its
+ // validity period, and has the newest validity start date.
+ // Client SHOULD select the first Service that meets this requirement.
+ // All listed Services SHOULD be sorted by the `valid_for` window in
+ // descending order, with the newest instance first.
+ OidcUrls []*Service `protobuf:"bytes,7,rep,name=oidc_urls,json=oidcUrls,proto3" json:"oidc_urls,omitempty"`
+ // URLs to Rekor transparency logs.
+ //
+ // These URL MUST be the "base" URLs for the transparency logs,
+ // which clients should construct appropriate API endpoints on top of.
+ //
+ // Clients MUST group Services by `operator` and select at most one
+ // Service from each operator. Clients MUST select Services with the
+ // highest API version that the client is compatible with, that are
+ // within its validity period, and have the newest validity start dates.
+ // All listed Services SHOULD be sorted by the `valid_for` window in
+ // descending order, with the newest instance first.
+ //
+ // Clients MUST select Services based on the selector value of
+ // `rekor_tlog_config`.
+ RekorTlogUrls []*Service `protobuf:"bytes,8,rep,name=rekor_tlog_urls,json=rekorTlogUrls,proto3" json:"rekor_tlog_urls,omitempty"`
+ // Specifies how a client should select the set of Rekor transparency
+ // logs to write to.
+ RekorTlogConfig *ServiceConfiguration `protobuf:"bytes,9,opt,name=rekor_tlog_config,json=rekorTlogConfig,proto3" json:"rekor_tlog_config,omitempty"`
+ // URLs to RFC 3161 Time Stamping Authorities (TSA).
+ //
+ // These URLs MUST be the *full* URL for the TSA, meaning that it
+ // should be suitable for submitting Time Stamp Requests (TSRs) to
+ // via HTTP, per RFC 3161.
+ //
+ // Clients MUST group Services by `operator` and select at most one
+ // Service from each operator. Clients MUST select Services with the
+ // highest API version that the client is compatible with, that are
+ // within its validity period, and have the newest validity start dates.
+ // All listed Services SHOULD be sorted by the `valid_for` window in
+ // descending order, with the newest instance first.
+ //
+ // Clients MUST select Services based on the selector value of
+ // `tsa_config`.
+ TsaUrls []*Service `protobuf:"bytes,10,rep,name=tsa_urls,json=tsaUrls,proto3" json:"tsa_urls,omitempty"`
+ // Specifies how a client should select the set of TSAs to request
+ // signed timestamps from.
+ TsaConfig *ServiceConfiguration `protobuf:"bytes,11,opt,name=tsa_config,json=tsaConfig,proto3" json:"tsa_config,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *SigningConfig) Reset() {
+ *x = SigningConfig{}
+ mi := &file_sigstore_trustroot_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SigningConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SigningConfig) ProtoMessage() {}
+
+func (x *SigningConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_sigstore_trustroot_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SigningConfig.ProtoReflect.Descriptor instead.
+func (*SigningConfig) Descriptor() ([]byte, []int) {
+ return file_sigstore_trustroot_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *SigningConfig) GetMediaType() string {
+ if x != nil {
+ return x.MediaType
+ }
+ return ""
+}
+
+func (x *SigningConfig) GetCaUrls() []*Service {
+ if x != nil {
+ return x.CaUrls
+ }
+ return nil
+}
+
+func (x *SigningConfig) GetOidcUrls() []*Service {
+ if x != nil {
+ return x.OidcUrls
+ }
+ return nil
+}
+
+func (x *SigningConfig) GetRekorTlogUrls() []*Service {
+ if x != nil {
+ return x.RekorTlogUrls
+ }
+ return nil
+}
+
+func (x *SigningConfig) GetRekorTlogConfig() *ServiceConfiguration {
+ if x != nil {
+ return x.RekorTlogConfig
+ }
+ return nil
+}
+
+func (x *SigningConfig) GetTsaUrls() []*Service {
+ if x != nil {
+ return x.TsaUrls
+ }
+ return nil
+}
+
+func (x *SigningConfig) GetTsaConfig() *ServiceConfiguration {
+ if x != nil {
+ return x.TsaConfig
+ }
+ return nil
+}
+
+// Service represents an instance of a service that is a part of Sigstore infrastructure.
+// When selecting one or multiple services from a list of services, clients MUST:
+// - Use the API version hint to determine the service with the highest API version
+// that the client is compatible with.
+// - Only select services within the specified validity period and that have the
+// newest validity start date.
+//
+// When selecting multiple services, clients MUST:
+// - Use the ServiceConfiguration to determine how many services MUST be selected.
+// Clients MUST return an error if there are not enough services that meet the
+// selection criteria.
+// - Group services by `operator` and select at most one service from an operator.
+// During verification, clients MUST treat valid verification metadata from the
+// operator as valid only once towards a threshold.
+// - Select services from only the highest supported API version.
+type Service struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // URL of the service. MUST include scheme and authority. MAY include path.
+ Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"`
+ // Specifies the major API version. A value of 0 represents a service that
+ // has not yet been released.
+ MajorApiVersion uint32 `protobuf:"varint,2,opt,name=major_api_version,json=majorApiVersion,proto3" json:"major_api_version,omitempty"`
+ // Validity period of a service. A service that has only a start date
+ // SHOULD be considered the most recent instance of that service, but
+ // the client MUST NOT assume there is only one valid instance.
+ // The TimeRange MUST be considered valid *inclusive* of the
+ // endpoints.
+ ValidFor *v1.TimeRange `protobuf:"bytes,3,opt,name=valid_for,json=validFor,proto3" json:"valid_for,omitempty"`
+ // Specifies the name of the service operator. When selecting multiple
+ // services, clients MUST use the operator to select services from
+ // distinct operators. Operator MUST be formatted as a scheme-less
+ // URI, e.g. sigstore.dev
+ Operator string `protobuf:"bytes,4,opt,name=operator,proto3" json:"operator,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Service) Reset() {
+ *x = Service{}
+ mi := &file_sigstore_trustroot_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service) ProtoMessage() {}
+
+func (x *Service) ProtoReflect() protoreflect.Message {
+ mi := &file_sigstore_trustroot_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service.ProtoReflect.Descriptor instead.
+func (*Service) Descriptor() ([]byte, []int) {
+ return file_sigstore_trustroot_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Service) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+func (x *Service) GetMajorApiVersion() uint32 {
+ if x != nil {
+ return x.MajorApiVersion
+ }
+ return 0
+}
+
+func (x *Service) GetValidFor() *v1.TimeRange {
+ if x != nil {
+ return x.ValidFor
+ }
+ return nil
+}
+
+func (x *Service) GetOperator() string {
+ if x != nil {
+ return x.Operator
+ }
+ return ""
+}
+
+// ServiceConfiguration specifies how a client should select a set of
+// Services to connect to, along with a count when a specific number
+// of Services is requested.
+type ServiceConfiguration struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // How a client should select a set of Services to connect to.
+ // Clients SHOULD NOT select services from multiple API versions.
+ Selector ServiceSelector `protobuf:"varint,1,opt,name=selector,proto3,enum=dev.sigstore.trustroot.v1.ServiceSelector" json:"selector,omitempty"`
+ // count specifies the number of Services the client should use.
+ // Only used when selector is set to EXACT, and count MUST be greater
+ // than 0. count MUST be less than or equal to the number of Services.
+ // Clients MUST return an error is there are not enough services
+ // that meet selection criteria.
+ Count uint32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ServiceConfiguration) Reset() {
+ *x = ServiceConfiguration{}
+ mi := &file_sigstore_trustroot_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ServiceConfiguration) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServiceConfiguration) ProtoMessage() {}
+
+func (x *ServiceConfiguration) ProtoReflect() protoreflect.Message {
+ mi := &file_sigstore_trustroot_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServiceConfiguration.ProtoReflect.Descriptor instead.
+func (*ServiceConfiguration) Descriptor() ([]byte, []int) {
+ return file_sigstore_trustroot_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *ServiceConfiguration) GetSelector() ServiceSelector {
+ if x != nil {
+ return x.Selector
+ }
+ return ServiceSelector_SERVICE_SELECTOR_UNDEFINED
+}
+
+func (x *ServiceConfiguration) GetCount() uint32 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+// ClientTrustConfig describes the complete state needed by a client
+// to perform both signing and verification operations against a particular
+// instance of Sigstore.
+type ClientTrustConfig struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // MUST be application/vnd.dev.sigstore.clienttrustconfig.v0.1+json
+ MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
+ // The root of trust, which MUST be present.
+ TrustedRoot *TrustedRoot `protobuf:"bytes,2,opt,name=trusted_root,json=trustedRoot,proto3" json:"trusted_root,omitempty"`
+ // Configuration for signing clients, which MUST be present.
+ SigningConfig *SigningConfig `protobuf:"bytes,3,opt,name=signing_config,json=signingConfig,proto3" json:"signing_config,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ClientTrustConfig) Reset() {
+ *x = ClientTrustConfig{}
+ mi := &file_sigstore_trustroot_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ClientTrustConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClientTrustConfig) ProtoMessage() {}
+
+func (x *ClientTrustConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_sigstore_trustroot_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClientTrustConfig.ProtoReflect.Descriptor instead.
+func (*ClientTrustConfig) Descriptor() ([]byte, []int) {
+ return file_sigstore_trustroot_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *ClientTrustConfig) GetMediaType() string {
+ if x != nil {
+ return x.MediaType
+ }
+ return ""
+}
+
+func (x *ClientTrustConfig) GetTrustedRoot() *TrustedRoot {
+ if x != nil {
+ return x.TrustedRoot
+ }
+ return nil
+}
+
+func (x *ClientTrustConfig) GetSigningConfig() *SigningConfig {
+ if x != nil {
+ return x.SigningConfig
+ }
+ return nil
+}
+
+var File_sigstore_trustroot_proto protoreflect.FileDescriptor
+
+var file_sigstore_trustroot_proto_rawDesc = string([]byte{
+ 0x0a, 0x18, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x72, 0x75, 0x73, 0x74,
+ 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x64, 0x65, 0x76, 0x2e,
+ 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f,
+ 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70,
+ 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65,
+ 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe1, 0x02,
+ 0x0a, 0x17, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x6f,
+ 0x67, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x62, 0x61, 0x73,
+ 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x61, 0x73,
+ 0x65, 0x55, 0x72, 0x6c, 0x12, 0x4c, 0x0a, 0x0e, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x61, 0x6c, 0x67,
+ 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x64,
+ 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d,
+ 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69,
+ 0x74, 0x68, 0x6d, 0x52, 0x0d, 0x68, 0x61, 0x73, 0x68, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74,
+ 0x68, 0x6d, 0x12, 0x40, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67,
+ 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e,
+ 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69,
+ 0x63, 0x4b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74,
+ 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x6f,
+ 0x67, 0x49, 0x64, 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x49, 0x0a, 0x11, 0x63, 0x68,
+ 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73,
+ 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c,
+ 0x6f, 0x67, 0x49, 0x64, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74,
+ 0x4b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f,
+ 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f,
+ 0x72, 0x22, 0x96, 0x02, 0x0a, 0x14, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x07, 0x73, 0x75,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x64, 0x65,
+ 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
+ 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x75, 0x69, 0x73, 0x68,
+ 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12,
+ 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72,
+ 0x69, 0x12, 0x4b, 0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73,
+ 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x58,
+ 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68,
+ 0x61, 0x69, 0x6e, 0x52, 0x09, 0x63, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x3e,
+ 0x0a, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52,
+ 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x46, 0x6f, 0x72, 0x12, 0x1a,
+ 0x0a, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x22, 0x92, 0x03, 0x0a, 0x0b, 0x54,
+ 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65,
+ 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+ 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x48, 0x0a, 0x05, 0x74, 0x6c, 0x6f,
+ 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73,
+ 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f,
+ 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63,
+ 0x79, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x74, 0x6c,
+ 0x6f, 0x67, 0x73, 0x12, 0x68, 0x0a, 0x17, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74,
+ 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31,
+ 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x16, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x4a, 0x0a,
+ 0x06, 0x63, 0x74, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e,
+ 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75,
+ 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x52, 0x06, 0x63, 0x74, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x64, 0x0a, 0x15, 0x74, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69,
+ 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73,
+ 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f,
+ 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65,
+ 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x14, 0x74, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22,
+ 0xea, 0x03, 0x0a, 0x0d, 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x3b, 0x0a, 0x07, 0x63, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65,
+ 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x06, 0x63, 0x61, 0x55, 0x72, 0x6c, 0x73, 0x12, 0x3f, 0x0a,
+ 0x09, 0x6f, 0x69, 0x64, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e,
+ 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x52, 0x08, 0x6f, 0x69, 0x64, 0x63, 0x55, 0x72, 0x6c, 0x73, 0x12, 0x4a,
+ 0x0a, 0x0f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x5f, 0x74, 0x6c, 0x6f, 0x67, 0x5f, 0x75, 0x72, 0x6c,
+ 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69,
+ 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74,
+ 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x6b,
+ 0x6f, 0x72, 0x54, 0x6c, 0x6f, 0x67, 0x55, 0x72, 0x6c, 0x73, 0x12, 0x5b, 0x0a, 0x11, 0x72, 0x65,
+ 0x6b, 0x6f, 0x72, 0x5f, 0x74, 0x6c, 0x6f, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18,
+ 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73,
+ 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76,
+ 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x54, 0x6c, 0x6f,
+ 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x08, 0x74, 0x73, 0x61, 0x5f, 0x75,
+ 0x72, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e,
+ 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f,
+ 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x07, 0x74,
+ 0x73, 0x61, 0x55, 0x72, 0x6c, 0x73, 0x12, 0x4e, 0x0a, 0x0a, 0x74, 0x73, 0x61, 0x5f, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x64, 0x65, 0x76,
+ 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72,
+ 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x74, 0x73, 0x61,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x05, 0x22, 0xa3, 0x01, 0x0a,
+ 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x2a, 0x0a, 0x11, 0x6d, 0x61,
+ 0x6a, 0x6f, 0x72, 0x5f, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x41, 0x70, 0x69, 0x56,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f,
+ 0x66, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x65, 0x76, 0x2e,
+ 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e,
+ 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x76, 0x61,
+ 0x6c, 0x69, 0x64, 0x46, 0x6f, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x6f, 0x72, 0x22, 0x74, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x08, 0x73, 0x65,
+ 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x64,
+ 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73,
+ 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74,
+ 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0d, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xd8, 0x01, 0x0a, 0x11, 0x43, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x54, 0x72, 0x75, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4e, 0x0a,
+ 0x0c, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f,
+ 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x2e,
+ 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02,
+ 0x52, 0x0b, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x54, 0x0a,
+ 0x0e, 0x73, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73,
+ 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76,
+ 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x73, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2a, 0x4e, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x65,
+ 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43,
+ 0x45, 0x5f, 0x53, 0x45, 0x4c, 0x45, 0x43, 0x54, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x44, 0x45, 0x46,
+ 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x12,
+ 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x58, 0x41, 0x43,
+ 0x54, 0x10, 0x03, 0x42, 0x88, 0x01, 0x0a, 0x1f, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73,
+ 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x75, 0x73, 0x74,
+ 0x72, 0x6f, 0x6f, 0x74, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x52, 0x6f,
+ 0x6f, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75,
+ 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d, 0x73, 0x70, 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65,
+ 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x74, 0x72, 0x75, 0x73, 0x74, 0x72, 0x6f, 0x6f,
+ 0x74, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x17, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a,
+ 0x3a, 0x54, 0x72, 0x75, 0x73, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+})
+
+var (
+ file_sigstore_trustroot_proto_rawDescOnce sync.Once
+ file_sigstore_trustroot_proto_rawDescData []byte
+)
+
+func file_sigstore_trustroot_proto_rawDescGZIP() []byte {
+ file_sigstore_trustroot_proto_rawDescOnce.Do(func() {
+ file_sigstore_trustroot_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_sigstore_trustroot_proto_rawDesc), len(file_sigstore_trustroot_proto_rawDesc)))
+ })
+ return file_sigstore_trustroot_proto_rawDescData
+}
+
+var file_sigstore_trustroot_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_sigstore_trustroot_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
+var file_sigstore_trustroot_proto_goTypes = []any{
+ (ServiceSelector)(0), // 0: dev.sigstore.trustroot.v1.ServiceSelector
+ (*TransparencyLogInstance)(nil), // 1: dev.sigstore.trustroot.v1.TransparencyLogInstance
+ (*CertificateAuthority)(nil), // 2: dev.sigstore.trustroot.v1.CertificateAuthority
+ (*TrustedRoot)(nil), // 3: dev.sigstore.trustroot.v1.TrustedRoot
+ (*SigningConfig)(nil), // 4: dev.sigstore.trustroot.v1.SigningConfig
+ (*Service)(nil), // 5: dev.sigstore.trustroot.v1.Service
+ (*ServiceConfiguration)(nil), // 6: dev.sigstore.trustroot.v1.ServiceConfiguration
+ (*ClientTrustConfig)(nil), // 7: dev.sigstore.trustroot.v1.ClientTrustConfig
+ (v1.HashAlgorithm)(0), // 8: dev.sigstore.common.v1.HashAlgorithm
+ (*v1.PublicKey)(nil), // 9: dev.sigstore.common.v1.PublicKey
+ (*v1.LogId)(nil), // 10: dev.sigstore.common.v1.LogId
+ (*v1.DistinguishedName)(nil), // 11: dev.sigstore.common.v1.DistinguishedName
+ (*v1.X509CertificateChain)(nil), // 12: dev.sigstore.common.v1.X509CertificateChain
+ (*v1.TimeRange)(nil), // 13: dev.sigstore.common.v1.TimeRange
+}
+var file_sigstore_trustroot_proto_depIdxs = []int32{
+ 8, // 0: dev.sigstore.trustroot.v1.TransparencyLogInstance.hash_algorithm:type_name -> dev.sigstore.common.v1.HashAlgorithm
+ 9, // 1: dev.sigstore.trustroot.v1.TransparencyLogInstance.public_key:type_name -> dev.sigstore.common.v1.PublicKey
+ 10, // 2: dev.sigstore.trustroot.v1.TransparencyLogInstance.log_id:type_name -> dev.sigstore.common.v1.LogId
+ 10, // 3: dev.sigstore.trustroot.v1.TransparencyLogInstance.checkpoint_key_id:type_name -> dev.sigstore.common.v1.LogId
+ 11, // 4: dev.sigstore.trustroot.v1.CertificateAuthority.subject:type_name -> dev.sigstore.common.v1.DistinguishedName
+ 12, // 5: dev.sigstore.trustroot.v1.CertificateAuthority.cert_chain:type_name -> dev.sigstore.common.v1.X509CertificateChain
+ 13, // 6: dev.sigstore.trustroot.v1.CertificateAuthority.valid_for:type_name -> dev.sigstore.common.v1.TimeRange
+ 1, // 7: dev.sigstore.trustroot.v1.TrustedRoot.tlogs:type_name -> dev.sigstore.trustroot.v1.TransparencyLogInstance
+ 2, // 8: dev.sigstore.trustroot.v1.TrustedRoot.certificate_authorities:type_name -> dev.sigstore.trustroot.v1.CertificateAuthority
+ 1, // 9: dev.sigstore.trustroot.v1.TrustedRoot.ctlogs:type_name -> dev.sigstore.trustroot.v1.TransparencyLogInstance
+ 2, // 10: dev.sigstore.trustroot.v1.TrustedRoot.timestamp_authorities:type_name -> dev.sigstore.trustroot.v1.CertificateAuthority
+ 5, // 11: dev.sigstore.trustroot.v1.SigningConfig.ca_urls:type_name -> dev.sigstore.trustroot.v1.Service
+ 5, // 12: dev.sigstore.trustroot.v1.SigningConfig.oidc_urls:type_name -> dev.sigstore.trustroot.v1.Service
+ 5, // 13: dev.sigstore.trustroot.v1.SigningConfig.rekor_tlog_urls:type_name -> dev.sigstore.trustroot.v1.Service
+ 6, // 14: dev.sigstore.trustroot.v1.SigningConfig.rekor_tlog_config:type_name -> dev.sigstore.trustroot.v1.ServiceConfiguration
+ 5, // 15: dev.sigstore.trustroot.v1.SigningConfig.tsa_urls:type_name -> dev.sigstore.trustroot.v1.Service
+ 6, // 16: dev.sigstore.trustroot.v1.SigningConfig.tsa_config:type_name -> dev.sigstore.trustroot.v1.ServiceConfiguration
+ 13, // 17: dev.sigstore.trustroot.v1.Service.valid_for:type_name -> dev.sigstore.common.v1.TimeRange
+ 0, // 18: dev.sigstore.trustroot.v1.ServiceConfiguration.selector:type_name -> dev.sigstore.trustroot.v1.ServiceSelector
+ 3, // 19: dev.sigstore.trustroot.v1.ClientTrustConfig.trusted_root:type_name -> dev.sigstore.trustroot.v1.TrustedRoot
+ 4, // 20: dev.sigstore.trustroot.v1.ClientTrustConfig.signing_config:type_name -> dev.sigstore.trustroot.v1.SigningConfig
+ 21, // [21:21] is the sub-list for method output_type
+ 21, // [21:21] is the sub-list for method input_type
+ 21, // [21:21] is the sub-list for extension type_name
+ 21, // [21:21] is the sub-list for extension extendee
+ 0, // [0:21] is the sub-list for field type_name
+}
+
+func init() { file_sigstore_trustroot_proto_init() }
+func file_sigstore_trustroot_proto_init() {
+ if File_sigstore_trustroot_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_sigstore_trustroot_proto_rawDesc), len(file_sigstore_trustroot_proto_rawDesc)),
+ NumEnums: 1,
+ NumMessages: 7,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_sigstore_trustroot_proto_goTypes,
+ DependencyIndexes: file_sigstore_trustroot_proto_depIdxs,
+ EnumInfos: file_sigstore_trustroot_proto_enumTypes,
+ MessageInfos: file_sigstore_trustroot_proto_msgTypes,
+ }.Build()
+ File_sigstore_trustroot_proto = out.File
+ file_sigstore_trustroot_proto_goTypes = nil
+ file_sigstore_trustroot_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/COPYRIGHT.txt b/vendor/github.com/sigstore/rekor-tiles/v2/COPYRIGHT.txt
new file mode 100644
index 00000000000..8bb896e7f90
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor-tiles/v2/COPYRIGHT.txt
@@ -0,0 +1,13 @@
+Copyright 2025 The Sigstore Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/LICENSE b/vendor/github.com/sigstore/rekor-tiles/v2/LICENSE
new file mode 100644
index 00000000000..261eeb9e9f8
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor-tiles/v2/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/internal/safeint/safeint.go b/vendor/github.com/sigstore/rekor-tiles/v2/internal/safeint/safeint.go
new file mode 100644
index 00000000000..bb28b196acb
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor-tiles/v2/internal/safeint/safeint.go
@@ -0,0 +1,68 @@
+//
+// Copyright 2025 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package safeint
+
+import (
+ "fmt"
+ "math"
+)
+
+// SafeInt64 holds equivalent int64 and uint64 integers.
+type SafeInt64 struct {
+ u uint64
+ i int64
+}
+
+// NewSafeInt64 returns a safeInt64 struct as long as the number is either an
+// int64 or uint64 and the value can safely be converted in either direction
+// without overflowing, i.e. is not greater than MaxInt64 and not negative.
+//
+// This has implications for its usage, e.g. when used for the tree size, a new
+// tree must be created to replace the old tree before its size reaches
+// math.MaxInt64.
+//
+// This is needed for compatibility with TransparencyLogEntry
+// (https://github.com/sigstore/protobuf-specs/blob/e871d3e6fd06fa73a1524ef0efaf1452d3304cf6/protos/sigstore_rekor.proto#L86-L138).
+func NewSafeInt64(number any) (*SafeInt64, error) {
+ var result SafeInt64
+ switch n := number.(type) {
+ case uint64:
+ if n > math.MaxInt64 {
+ return nil, fmt.Errorf("exceeded max int64: %d", n)
+ }
+ result.u = n
+ result.i = int64(n) //nolint:gosec
+ case int64:
+ if n < 0 {
+ return nil, fmt.Errorf("negative integer: %d", n)
+ }
+ result.u = uint64(n) //nolint:gosec
+ result.i = n
+ default:
+ return nil, fmt.Errorf("only uint64 and int64 are supported")
+ }
+ return &result, nil
+}
+
+// U returns the uint64 value of the integer.
+func (s *SafeInt64) U() uint64 {
+ return s.u
+}
+
+// I returns the int64 value of the integer.
+func (s *SafeInt64) I() int64 {
+ return s.i
+}
diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/dsse.pb.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/dsse.pb.go
new file mode 100644
index 00000000000..e243eabb932
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/dsse.pb.go
@@ -0,0 +1,248 @@
+// Copyright 2025 The Sigstore Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.5
+// protoc v6.30.2
+// source: rekor/v2/dsse.proto
+
+package protobuf
+
+import (
+ v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1"
+ dsse "github.com/sigstore/protobuf-specs/gen/pb-go/dsse"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// A request to add a DSSE v0.0.2 entry to the log
+type DSSERequestV002 struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // A DSSE envelope
+ Envelope *dsse.Envelope `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"`
+ // All necessary verification material to verify all signatures embedded in the envelope
+ Verifiers []*Verifier `protobuf:"bytes,2,rep,name=verifiers,proto3" json:"verifiers,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *DSSERequestV002) Reset() {
+ *x = DSSERequestV002{}
+ mi := &file_rekor_v2_dsse_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DSSERequestV002) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DSSERequestV002) ProtoMessage() {}
+
+func (x *DSSERequestV002) ProtoReflect() protoreflect.Message {
+ mi := &file_rekor_v2_dsse_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DSSERequestV002.ProtoReflect.Descriptor instead.
+func (*DSSERequestV002) Descriptor() ([]byte, []int) {
+ return file_rekor_v2_dsse_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *DSSERequestV002) GetEnvelope() *dsse.Envelope {
+ if x != nil {
+ return x.Envelope
+ }
+ return nil
+}
+
+func (x *DSSERequestV002) GetVerifiers() []*Verifier {
+ if x != nil {
+ return x.Verifiers
+ }
+ return nil
+}
+
+type DSSELogEntryV002 struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // The hash of the DSSE payload
+ PayloadHash *v1.HashOutput `protobuf:"bytes,1,opt,name=payloadHash,proto3" json:"payloadHash,omitempty"`
+ // Signatures and their associated verification material used to verify the payload
+ Signatures []*Signature `protobuf:"bytes,2,rep,name=signatures,proto3" json:"signatures,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *DSSELogEntryV002) Reset() {
+ *x = DSSELogEntryV002{}
+ mi := &file_rekor_v2_dsse_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DSSELogEntryV002) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DSSELogEntryV002) ProtoMessage() {}
+
+func (x *DSSELogEntryV002) ProtoReflect() protoreflect.Message {
+ mi := &file_rekor_v2_dsse_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DSSELogEntryV002.ProtoReflect.Descriptor instead.
+func (*DSSELogEntryV002) Descriptor() ([]byte, []int) {
+ return file_rekor_v2_dsse_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *DSSELogEntryV002) GetPayloadHash() *v1.HashOutput {
+ if x != nil {
+ return x.PayloadHash
+ }
+ return nil
+}
+
+func (x *DSSELogEntryV002) GetSignatures() []*Signature {
+ if x != nil {
+ return x.Signatures
+ }
+ return nil
+}
+
+var File_rekor_v2_dsse_proto protoreflect.FileDescriptor
+
+var file_rekor_v2_dsse_proto_rawDesc = string([]byte{
+ 0x0a, 0x13, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x64, 0x73, 0x73, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74,
+ 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x1f, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62,
+ 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x73,
+ 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0e, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x76,
+ 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8b, 0x01,
+ 0x0a, 0x0f, 0x44, 0x53, 0x53, 0x45, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x30, 0x30,
+ 0x32, 0x12, 0x34, 0x0a, 0x08, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x69, 0x6f, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x74, 0x6f, 0x2e,
+ 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x65,
+ 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x12, 0x42, 0x0a, 0x09, 0x76, 0x65, 0x72, 0x69, 0x66,
+ 0x69, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x64, 0x65, 0x76,
+ 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e,
+ 0x76, 0x32, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x02,
+ 0x52, 0x09, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x22, 0xa4, 0x01, 0x0a, 0x10,
+ 0x44, 0x53, 0x53, 0x45, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x56, 0x30, 0x30, 0x32,
+ 0x12, 0x49, 0x0a, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x61, 0x73, 0x68, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73,
+ 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48,
+ 0x61, 0x73, 0x68, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b,
+ 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x45, 0x0a, 0x0a, 0x73,
+ 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x20, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72,
+ 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x73, 0x42, 0x7d, 0x0a, 0x1b, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f,
+ 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76,
+ 0x32, 0x42, 0x0b, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x56, 0x32, 0x44, 0x73, 0x73, 0x65, 0x50, 0x01,
+ 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67,
+ 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2d, 0x74, 0x69, 0x6c, 0x65,
+ 0x73, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0xea, 0x02, 0x13, 0x53, 0x69,
+ 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x3a, 0x3a, 0x56,
+ 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+})
+
+var (
+ file_rekor_v2_dsse_proto_rawDescOnce sync.Once
+ file_rekor_v2_dsse_proto_rawDescData []byte
+)
+
+func file_rekor_v2_dsse_proto_rawDescGZIP() []byte {
+ file_rekor_v2_dsse_proto_rawDescOnce.Do(func() {
+ file_rekor_v2_dsse_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_rekor_v2_dsse_proto_rawDesc), len(file_rekor_v2_dsse_proto_rawDesc)))
+ })
+ return file_rekor_v2_dsse_proto_rawDescData
+}
+
+var file_rekor_v2_dsse_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_rekor_v2_dsse_proto_goTypes = []any{
+ (*DSSERequestV002)(nil), // 0: dev.sigstore.rekor.v2.DSSERequestV002
+ (*DSSELogEntryV002)(nil), // 1: dev.sigstore.rekor.v2.DSSELogEntryV002
+ (*dsse.Envelope)(nil), // 2: io.intoto.Envelope
+ (*Verifier)(nil), // 3: dev.sigstore.rekor.v2.Verifier
+ (*v1.HashOutput)(nil), // 4: dev.sigstore.common.v1.HashOutput
+ (*Signature)(nil), // 5: dev.sigstore.rekor.v2.Signature
+}
+var file_rekor_v2_dsse_proto_depIdxs = []int32{
+ 2, // 0: dev.sigstore.rekor.v2.DSSERequestV002.envelope:type_name -> io.intoto.Envelope
+ 3, // 1: dev.sigstore.rekor.v2.DSSERequestV002.verifiers:type_name -> dev.sigstore.rekor.v2.Verifier
+ 4, // 2: dev.sigstore.rekor.v2.DSSELogEntryV002.payloadHash:type_name -> dev.sigstore.common.v1.HashOutput
+ 5, // 3: dev.sigstore.rekor.v2.DSSELogEntryV002.signatures:type_name -> dev.sigstore.rekor.v2.Signature
+ 4, // [4:4] is the sub-list for method output_type
+ 4, // [4:4] is the sub-list for method input_type
+ 4, // [4:4] is the sub-list for extension type_name
+ 4, // [4:4] is the sub-list for extension extendee
+ 0, // [0:4] is the sub-list for field type_name
+}
+
+func init() { file_rekor_v2_dsse_proto_init() }
+func file_rekor_v2_dsse_proto_init() {
+ if File_rekor_v2_dsse_proto != nil {
+ return
+ }
+ file_rekor_v2_verifier_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_rekor_v2_dsse_proto_rawDesc), len(file_rekor_v2_dsse_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_rekor_v2_dsse_proto_goTypes,
+ DependencyIndexes: file_rekor_v2_dsse_proto_depIdxs,
+ MessageInfos: file_rekor_v2_dsse_proto_msgTypes,
+ }.Build()
+ File_rekor_v2_dsse_proto = out.File
+ file_rekor_v2_dsse_proto_goTypes = nil
+ file_rekor_v2_dsse_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/entry.pb.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/entry.pb.go
new file mode 100644
index 00000000000..fded303ae7a
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/entry.pb.go
@@ -0,0 +1,395 @@
+// Copyright 2025 The Sigstore Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.5
+// protoc v6.30.2
+// source: rekor/v2/entry.proto
+
+package protobuf
+
+import (
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Entry is the message that is canonicalized and uploaded to the log.
+// This format is meant to be compliant with Rekor v1 entries in that
+// the `apiVersion` and `kind` can be parsed before parsing the spec.
+// Clients are expected to understand and handle the differences in the
+// contents of `spec` between Rekor v1 (a polymorphic OpenAPI defintion)
+// and Rekor v2 (a typed proto defintion).
+type Entry struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"`
+ ApiVersion string `protobuf:"bytes,2,opt,name=api_version,json=apiVersion,proto3" json:"api_version,omitempty"`
+ Spec *Spec `protobuf:"bytes,3,opt,name=spec,proto3" json:"spec,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Entry) Reset() {
+ *x = Entry{}
+ mi := &file_rekor_v2_entry_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Entry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Entry) ProtoMessage() {}
+
+func (x *Entry) ProtoReflect() protoreflect.Message {
+ mi := &file_rekor_v2_entry_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Entry.ProtoReflect.Descriptor instead.
+func (*Entry) Descriptor() ([]byte, []int) {
+ return file_rekor_v2_entry_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Entry) GetKind() string {
+ if x != nil {
+ return x.Kind
+ }
+ return ""
+}
+
+func (x *Entry) GetApiVersion() string {
+ if x != nil {
+ return x.ApiVersion
+ }
+ return ""
+}
+
+func (x *Entry) GetSpec() *Spec {
+ if x != nil {
+ return x.Spec
+ }
+ return nil
+}
+
+// Spec contains one of the Rekor entry types.
+type Spec struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Spec:
+ //
+ // *Spec_HashedRekordV002
+ // *Spec_DsseV002
+ Spec isSpec_Spec `protobuf_oneof:"spec"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Spec) Reset() {
+ *x = Spec{}
+ mi := &file_rekor_v2_entry_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Spec) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Spec) ProtoMessage() {}
+
+func (x *Spec) ProtoReflect() protoreflect.Message {
+ mi := &file_rekor_v2_entry_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Spec.ProtoReflect.Descriptor instead.
+func (*Spec) Descriptor() ([]byte, []int) {
+ return file_rekor_v2_entry_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Spec) GetSpec() isSpec_Spec {
+ if x != nil {
+ return x.Spec
+ }
+ return nil
+}
+
+func (x *Spec) GetHashedRekordV002() *HashedRekordLogEntryV002 {
+ if x != nil {
+ if x, ok := x.Spec.(*Spec_HashedRekordV002); ok {
+ return x.HashedRekordV002
+ }
+ }
+ return nil
+}
+
+func (x *Spec) GetDsseV002() *DSSELogEntryV002 {
+ if x != nil {
+ if x, ok := x.Spec.(*Spec_DsseV002); ok {
+ return x.DsseV002
+ }
+ }
+ return nil
+}
+
+type isSpec_Spec interface {
+ isSpec_Spec()
+}
+
+type Spec_HashedRekordV002 struct {
+ HashedRekordV002 *HashedRekordLogEntryV002 `protobuf:"bytes,1,opt,name=hashed_rekord_v002,json=hashedRekordV002,proto3,oneof"`
+}
+
+type Spec_DsseV002 struct {
+ DsseV002 *DSSELogEntryV002 `protobuf:"bytes,2,opt,name=dsse_v002,json=dsseV002,proto3,oneof"`
+}
+
+func (*Spec_HashedRekordV002) isSpec_Spec() {}
+
+func (*Spec_DsseV002) isSpec_Spec() {}
+
+// Create a new HashedRekord or DSSE
+type CreateEntryRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Spec:
+ //
+ // *CreateEntryRequest_HashedRekordRequestV002
+ // *CreateEntryRequest_DsseRequestV002
+ Spec isCreateEntryRequest_Spec `protobuf_oneof:"spec"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *CreateEntryRequest) Reset() {
+ *x = CreateEntryRequest{}
+ mi := &file_rekor_v2_entry_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateEntryRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateEntryRequest) ProtoMessage() {}
+
+func (x *CreateEntryRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_rekor_v2_entry_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateEntryRequest.ProtoReflect.Descriptor instead.
+func (*CreateEntryRequest) Descriptor() ([]byte, []int) {
+ return file_rekor_v2_entry_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *CreateEntryRequest) GetSpec() isCreateEntryRequest_Spec {
+ if x != nil {
+ return x.Spec
+ }
+ return nil
+}
+
+func (x *CreateEntryRequest) GetHashedRekordRequestV002() *HashedRekordRequestV002 {
+ if x != nil {
+ if x, ok := x.Spec.(*CreateEntryRequest_HashedRekordRequestV002); ok {
+ return x.HashedRekordRequestV002
+ }
+ }
+ return nil
+}
+
+func (x *CreateEntryRequest) GetDsseRequestV002() *DSSERequestV002 {
+ if x != nil {
+ if x, ok := x.Spec.(*CreateEntryRequest_DsseRequestV002); ok {
+ return x.DsseRequestV002
+ }
+ }
+ return nil
+}
+
+type isCreateEntryRequest_Spec interface {
+ isCreateEntryRequest_Spec()
+}
+
+type CreateEntryRequest_HashedRekordRequestV002 struct {
+ HashedRekordRequestV002 *HashedRekordRequestV002 `protobuf:"bytes,1,opt,name=hashed_rekord_request_v002,json=hashedRekordRequestV002,proto3,oneof"`
+}
+
+type CreateEntryRequest_DsseRequestV002 struct {
+ DsseRequestV002 *DSSERequestV002 `protobuf:"bytes,2,opt,name=dsse_request_v002,json=dsseRequestV002,proto3,oneof"`
+}
+
+func (*CreateEntryRequest_HashedRekordRequestV002) isCreateEntryRequest_Spec() {}
+
+func (*CreateEntryRequest_DsseRequestV002) isCreateEntryRequest_Spec() {}
+
+var File_rekor_v2_entry_proto protoreflect.FileDescriptor
+
+var file_rekor_v2_entry_proto_rawDesc = string([]byte{
+ 0x0a, 0x14, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x65, 0x6e, 0x74, 0x72, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73,
+ 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x1f, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
+ 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x13,
+ 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x64, 0x73, 0x73, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x68, 0x61,
+ 0x73, 0x68, 0x65, 0x64, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0x7c, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x04, 0x6b, 0x69, 0x6e,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6b, 0x69,
+ 0x6e, 0x64, 0x12, 0x24, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x61, 0x70,
+ 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67,
+ 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53,
+ 0x70, 0x65, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x22, 0xc1,
+ 0x01, 0x0a, 0x04, 0x53, 0x70, 0x65, 0x63, 0x12, 0x64, 0x0a, 0x12, 0x68, 0x61, 0x73, 0x68, 0x65,
+ 0x64, 0x5f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x5f, 0x76, 0x30, 0x30, 0x32, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f,
+ 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x61, 0x73, 0x68,
+ 0x65, 0x64, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x56, 0x30, 0x30, 0x32, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x10, 0x68, 0x61, 0x73,
+ 0x68, 0x65, 0x64, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x56, 0x30, 0x30, 0x32, 0x12, 0x4b, 0x0a,
+ 0x09, 0x64, 0x73, 0x73, 0x65, 0x5f, 0x76, 0x30, 0x30, 0x32, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x27, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e,
+ 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x53, 0x53, 0x45, 0x4c, 0x6f, 0x67,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x56, 0x30, 0x30, 0x32, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00,
+ 0x52, 0x08, 0x64, 0x73, 0x73, 0x65, 0x56, 0x30, 0x30, 0x32, 0x42, 0x06, 0x0a, 0x04, 0x73, 0x70,
+ 0x65, 0x63, 0x22, 0xeb, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x72, 0x0a, 0x1a, 0x68, 0x61, 0x73,
+ 0x68, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x5f, 0x76, 0x30, 0x30, 0x32, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e,
+ 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b,
+ 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x65, 0x64, 0x52, 0x65, 0x6b, 0x6f,
+ 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x30, 0x30, 0x32, 0x42, 0x03, 0xe0,
+ 0x41, 0x02, 0x48, 0x00, 0x52, 0x17, 0x68, 0x61, 0x73, 0x68, 0x65, 0x64, 0x52, 0x65, 0x6b, 0x6f,
+ 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x30, 0x30, 0x32, 0x12, 0x59, 0x0a,
+ 0x11, 0x64, 0x73, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x30,
+ 0x30, 0x32, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73,
+ 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32,
+ 0x2e, 0x44, 0x53, 0x53, 0x45, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x30, 0x30, 0x32,
+ 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x0f, 0x64, 0x73, 0x73, 0x65, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x56, 0x30, 0x30, 0x32, 0x42, 0x06, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63,
+ 0x42, 0x7e, 0x0a, 0x1b, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x42,
+ 0x0c, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x56, 0x32, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x50, 0x01, 0x5a,
+ 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73,
+ 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2d, 0x74, 0x69, 0x6c, 0x65, 0x73,
+ 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
+ 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0xea, 0x02, 0x13, 0x53, 0x69, 0x67,
+ 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x3a, 0x3a, 0x56, 0x32,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+})
+
+var (
+ file_rekor_v2_entry_proto_rawDescOnce sync.Once
+ file_rekor_v2_entry_proto_rawDescData []byte
+)
+
+func file_rekor_v2_entry_proto_rawDescGZIP() []byte {
+ file_rekor_v2_entry_proto_rawDescOnce.Do(func() {
+ file_rekor_v2_entry_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_rekor_v2_entry_proto_rawDesc), len(file_rekor_v2_entry_proto_rawDesc)))
+ })
+ return file_rekor_v2_entry_proto_rawDescData
+}
+
+var file_rekor_v2_entry_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_rekor_v2_entry_proto_goTypes = []any{
+ (*Entry)(nil), // 0: dev.sigstore.rekor.v2.Entry
+ (*Spec)(nil), // 1: dev.sigstore.rekor.v2.Spec
+ (*CreateEntryRequest)(nil), // 2: dev.sigstore.rekor.v2.CreateEntryRequest
+ (*HashedRekordLogEntryV002)(nil), // 3: dev.sigstore.rekor.v2.HashedRekordLogEntryV002
+ (*DSSELogEntryV002)(nil), // 4: dev.sigstore.rekor.v2.DSSELogEntryV002
+ (*HashedRekordRequestV002)(nil), // 5: dev.sigstore.rekor.v2.HashedRekordRequestV002
+ (*DSSERequestV002)(nil), // 6: dev.sigstore.rekor.v2.DSSERequestV002
+}
+var file_rekor_v2_entry_proto_depIdxs = []int32{
+ 1, // 0: dev.sigstore.rekor.v2.Entry.spec:type_name -> dev.sigstore.rekor.v2.Spec
+ 3, // 1: dev.sigstore.rekor.v2.Spec.hashed_rekord_v002:type_name -> dev.sigstore.rekor.v2.HashedRekordLogEntryV002
+ 4, // 2: dev.sigstore.rekor.v2.Spec.dsse_v002:type_name -> dev.sigstore.rekor.v2.DSSELogEntryV002
+ 5, // 3: dev.sigstore.rekor.v2.CreateEntryRequest.hashed_rekord_request_v002:type_name -> dev.sigstore.rekor.v2.HashedRekordRequestV002
+ 6, // 4: dev.sigstore.rekor.v2.CreateEntryRequest.dsse_request_v002:type_name -> dev.sigstore.rekor.v2.DSSERequestV002
+ 5, // [5:5] is the sub-list for method output_type
+ 5, // [5:5] is the sub-list for method input_type
+ 5, // [5:5] is the sub-list for extension type_name
+ 5, // [5:5] is the sub-list for extension extendee
+ 0, // [0:5] is the sub-list for field type_name
+}
+
+func init() { file_rekor_v2_entry_proto_init() }
+func file_rekor_v2_entry_proto_init() {
+ if File_rekor_v2_entry_proto != nil {
+ return
+ }
+ file_rekor_v2_dsse_proto_init()
+ file_rekor_v2_hashedrekord_proto_init()
+ file_rekor_v2_entry_proto_msgTypes[1].OneofWrappers = []any{
+ (*Spec_HashedRekordV002)(nil),
+ (*Spec_DsseV002)(nil),
+ }
+ file_rekor_v2_entry_proto_msgTypes[2].OneofWrappers = []any{
+ (*CreateEntryRequest_HashedRekordRequestV002)(nil),
+ (*CreateEntryRequest_DsseRequestV002)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_rekor_v2_entry_proto_rawDesc), len(file_rekor_v2_entry_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_rekor_v2_entry_proto_goTypes,
+ DependencyIndexes: file_rekor_v2_entry_proto_depIdxs,
+ MessageInfos: file_rekor_v2_entry_proto_msgTypes,
+ }.Build()
+ File_rekor_v2_entry_proto = out.File
+ file_rekor_v2_entry_proto_goTypes = nil
+ file_rekor_v2_entry_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/hashedrekord.pb.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/hashedrekord.pb.go
new file mode 100644
index 00000000000..21d0612dafd
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/hashedrekord.pb.go
@@ -0,0 +1,243 @@
+// Copyright 2025 The Sigstore Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.5
+// protoc v6.30.2
+// source: rekor/v2/hashedrekord.proto
+
+package protobuf
+
+import (
+ v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// A request to add a hashedrekord v0.0.2 to the log
+type HashedRekordRequestV002 struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // The hashed data
+ Digest []byte `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"`
+ // A single signature over the hashed data with the verifier needed to validate it
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *HashedRekordRequestV002) Reset() {
+ *x = HashedRekordRequestV002{}
+ mi := &file_rekor_v2_hashedrekord_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *HashedRekordRequestV002) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HashedRekordRequestV002) ProtoMessage() {}
+
+func (x *HashedRekordRequestV002) ProtoReflect() protoreflect.Message {
+ mi := &file_rekor_v2_hashedrekord_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HashedRekordRequestV002.ProtoReflect.Descriptor instead.
+func (*HashedRekordRequestV002) Descriptor() ([]byte, []int) {
+ return file_rekor_v2_hashedrekord_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *HashedRekordRequestV002) GetDigest() []byte {
+ if x != nil {
+ return x.Digest
+ }
+ return nil
+}
+
+func (x *HashedRekordRequestV002) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type HashedRekordLogEntryV002 struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // The hashed data
+ Data *v1.HashOutput `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
+ // A single signature over the hashed data with the verifier needed to validate it
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *HashedRekordLogEntryV002) Reset() {
+ *x = HashedRekordLogEntryV002{}
+ mi := &file_rekor_v2_hashedrekord_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *HashedRekordLogEntryV002) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HashedRekordLogEntryV002) ProtoMessage() {}
+
+func (x *HashedRekordLogEntryV002) ProtoReflect() protoreflect.Message {
+ mi := &file_rekor_v2_hashedrekord_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HashedRekordLogEntryV002.ProtoReflect.Descriptor instead.
+func (*HashedRekordLogEntryV002) Descriptor() ([]byte, []int) {
+ return file_rekor_v2_hashedrekord_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *HashedRekordLogEntryV002) GetData() *v1.HashOutput {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+func (x *HashedRekordLogEntryV002) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+var File_rekor_v2_hashedrekord_proto protoreflect.FileDescriptor
+
+var file_rekor_v2_hashedrekord_proto_rawDesc = string([]byte{
+ 0x0a, 0x1b, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x65,
+ 0x64, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x64,
+ 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f,
+ 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69,
+ 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x72, 0x65,
+ 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x72, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7b, 0x0a, 0x17, 0x48, 0x61, 0x73, 0x68, 0x65, 0x64, 0x52,
+ 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x30, 0x30, 0x32,
+ 0x12, 0x1b, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
+ 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a,
+ 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x20, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e,
+ 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x22, 0x9c, 0x01, 0x0a, 0x18, 0x48, 0x61, 0x73, 0x68, 0x65, 0x64, 0x52, 0x65, 0x6b,
+ 0x6f, 0x72, 0x64, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x56, 0x30, 0x30, 0x32, 0x12,
+ 0x3b, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e,
+ 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x4f, 0x75, 0x74, 0x70, 0x75,
+ 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x43, 0x0a, 0x09,
+ 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x20, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72,
+ 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x42, 0x85, 0x01, 0x0a, 0x1b, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f,
+ 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76,
+ 0x32, 0x42, 0x13, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x56, 0x32, 0x48, 0x61, 0x73, 0x68, 0x65, 0x64,
+ 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x64, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65,
+ 0x6b, 0x6f, 0x72, 0x2d, 0x74, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67,
+ 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0xea, 0x02, 0x13, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a,
+ 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+})
+
+var (
+ file_rekor_v2_hashedrekord_proto_rawDescOnce sync.Once
+ file_rekor_v2_hashedrekord_proto_rawDescData []byte
+)
+
+func file_rekor_v2_hashedrekord_proto_rawDescGZIP() []byte {
+ file_rekor_v2_hashedrekord_proto_rawDescOnce.Do(func() {
+ file_rekor_v2_hashedrekord_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_rekor_v2_hashedrekord_proto_rawDesc), len(file_rekor_v2_hashedrekord_proto_rawDesc)))
+ })
+ return file_rekor_v2_hashedrekord_proto_rawDescData
+}
+
+var file_rekor_v2_hashedrekord_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_rekor_v2_hashedrekord_proto_goTypes = []any{
+ (*HashedRekordRequestV002)(nil), // 0: dev.sigstore.rekor.v2.HashedRekordRequestV002
+ (*HashedRekordLogEntryV002)(nil), // 1: dev.sigstore.rekor.v2.HashedRekordLogEntryV002
+ (*Signature)(nil), // 2: dev.sigstore.rekor.v2.Signature
+ (*v1.HashOutput)(nil), // 3: dev.sigstore.common.v1.HashOutput
+}
+var file_rekor_v2_hashedrekord_proto_depIdxs = []int32{
+ 2, // 0: dev.sigstore.rekor.v2.HashedRekordRequestV002.signature:type_name -> dev.sigstore.rekor.v2.Signature
+ 3, // 1: dev.sigstore.rekor.v2.HashedRekordLogEntryV002.data:type_name -> dev.sigstore.common.v1.HashOutput
+ 2, // 2: dev.sigstore.rekor.v2.HashedRekordLogEntryV002.signature:type_name -> dev.sigstore.rekor.v2.Signature
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_rekor_v2_hashedrekord_proto_init() }
+func file_rekor_v2_hashedrekord_proto_init() {
+ if File_rekor_v2_hashedrekord_proto != nil {
+ return
+ }
+ file_rekor_v2_verifier_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_rekor_v2_hashedrekord_proto_rawDesc), len(file_rekor_v2_hashedrekord_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_rekor_v2_hashedrekord_proto_goTypes,
+ DependencyIndexes: file_rekor_v2_hashedrekord_proto_depIdxs,
+ MessageInfos: file_rekor_v2_hashedrekord_proto_msgTypes,
+ }.Build()
+ File_rekor_v2_hashedrekord_proto = out.File
+ file_rekor_v2_hashedrekord_proto_goTypes = nil
+ file_rekor_v2_hashedrekord_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service.pb.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service.pb.go
new file mode 100644
index 00000000000..f261cdc3596
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service.pb.go
@@ -0,0 +1,287 @@
+// Copyright 2025 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.5
+// protoc v6.30.2
+// source: rekor/v2/rekor_service.proto
+
+package protobuf
+
+import (
+ _ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
+ v1 "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ httpbody "google.golang.org/genproto/googleapis/api/httpbody"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Request for a full or partial tile (see https://github.com/C2SP/C2SP/blob/main/tlog-tiles.md#merkle-tree)
+type TileRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ L uint32 `protobuf:"varint,1,opt,name=L,proto3" json:"L,omitempty"`
+ // N must be either an index encoded as zero-padded 3-digit path elements, e.g. "x123/x456/789",
+ // and may end with ".p/", where "" is a uint8
+ N string `protobuf:"bytes,2,opt,name=N,proto3" json:"N,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *TileRequest) Reset() {
+ *x = TileRequest{}
+ mi := &file_rekor_v2_rekor_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TileRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TileRequest) ProtoMessage() {}
+
+func (x *TileRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_rekor_v2_rekor_service_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TileRequest.ProtoReflect.Descriptor instead.
+func (*TileRequest) Descriptor() ([]byte, []int) {
+ return file_rekor_v2_rekor_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *TileRequest) GetL() uint32 {
+ if x != nil {
+ return x.L
+ }
+ return 0
+}
+
+func (x *TileRequest) GetN() string {
+ if x != nil {
+ return x.N
+ }
+ return ""
+}
+
+// Request for a full or partial entry bundle (see https://github.com/C2SP/C2SP/blob/main/tlog-tiles.md#log-entries)
+type EntryBundleRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // N must be either an index encoded as zero-padded 3-digit path elements, e.g. "x123/x456/789",
+ // and may end with ".p/", where "" is a uint8
+ N string `protobuf:"bytes,1,opt,name=N,proto3" json:"N,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *EntryBundleRequest) Reset() {
+ *x = EntryBundleRequest{}
+ mi := &file_rekor_v2_rekor_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *EntryBundleRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EntryBundleRequest) ProtoMessage() {}
+
+func (x *EntryBundleRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_rekor_v2_rekor_service_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EntryBundleRequest.ProtoReflect.Descriptor instead.
+func (*EntryBundleRequest) Descriptor() ([]byte, []int) {
+ return file_rekor_v2_rekor_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *EntryBundleRequest) GetN() string {
+ if x != nil {
+ return x.N
+ }
+ return ""
+}
+
+var File_rekor_v2_rekor_service_proto protoreflect.FileDescriptor
+
+var file_rekor_v2_rekor_service_proto_rawDesc = string([]byte{
+ 0x0a, 0x1c, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15,
+ 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b,
+ 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70,
+ 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f,
+ 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
+ 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x73, 0x69, 0x67,
+ 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x14, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x65, 0x6e, 0x74, 0x72,
+ 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x29, 0x0a, 0x0b, 0x54, 0x69, 0x6c, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0c, 0x0a, 0x01, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0d, 0x52, 0x01, 0x4c, 0x12, 0x0c, 0x0a, 0x01, 0x4e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x01, 0x4e, 0x22, 0x22, 0x0a, 0x12, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x75, 0x6e, 0x64, 0x6c,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0c, 0x0a, 0x01, 0x4e, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x01, 0x4e, 0x32, 0xc8, 0x03, 0x0a, 0x05, 0x52, 0x65, 0x6b, 0x6f, 0x72,
+ 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x29, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e,
+ 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x64, 0x65,
+ 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72,
+ 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x63, 0x79,
+ 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x1e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18,
+ 0x3a, 0x01, 0x2a, 0x22, 0x13, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x6c, 0x6f, 0x67,
+ 0x2f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x64, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x54,
+ 0x69, 0x6c, 0x65, 0x12, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f,
+ 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x6c, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x1f, 0x82,
+ 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x74,
+ 0x69, 0x6c, 0x65, 0x2f, 0x7b, 0x4c, 0x7d, 0x2f, 0x7b, 0x4e, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x76,
+ 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65,
+ 0x12, 0x29, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e,
+ 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x75,
+ 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64,
+ 0x79, 0x22, 0x23, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1d, 0x12, 0x1b, 0x2f, 0x61, 0x70, 0x69, 0x2f,
+ 0x76, 0x32, 0x2f, 0x74, 0x69, 0x6c, 0x65, 0x2f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x2f,
+ 0x7b, 0x4e, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x59, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a,
+ 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74,
+ 0x70, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x14, 0x12, 0x12, 0x2f,
+ 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e,
+ 0x74, 0x42, 0xc0, 0x03, 0x92, 0x41, 0xbc, 0x02, 0x12, 0xbc, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x6b,
+ 0x6f, 0x72, 0x20, 0x76, 0x32, 0x22, 0x5a, 0x0a, 0x10, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x20, 0x76,
+ 0x32, 0x20, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x68, 0x74, 0x74, 0x70, 0x73,
+ 0x3a, 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69,
+ 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2d, 0x74, 0x69, 0x6c,
+ 0x65, 0x73, 0x1a, 0x1d, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2d, 0x64, 0x65, 0x76,
+ 0x40, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2a, 0x4f, 0x0a, 0x12, 0x41, 0x70, 0x61, 0x63, 0x68, 0x65, 0x20, 0x4c, 0x69, 0x63, 0x65,
+ 0x6e, 0x73, 0x65, 0x20, 0x32, 0x2e, 0x30, 0x12, 0x39, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f,
+ 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73,
+ 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2d, 0x74, 0x69, 0x6c, 0x65, 0x73,
+ 0x2f, 0x62, 0x6c, 0x6f, 0x62, 0x2f, 0x6d, 0x61, 0x69, 0x6e, 0x2f, 0x4c, 0x49, 0x43, 0x45, 0x4e,
+ 0x53, 0x45, 0x32, 0x03, 0x32, 0x2e, 0x30, 0x1a, 0x14, 0x2a, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72,
+ 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x64, 0x65, 0x76, 0x2a, 0x01, 0x01,
+ 0x32, 0x10, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6a, 0x73,
+ 0x6f, 0x6e, 0x3a, 0x10, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f,
+ 0x6a, 0x73, 0x6f, 0x6e, 0x72, 0x3e, 0x0a, 0x13, 0x4d, 0x6f, 0x72, 0x65, 0x20, 0x61, 0x62, 0x6f,
+ 0x75, 0x74, 0x20, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x20, 0x76, 0x32, 0x12, 0x27, 0x68, 0x74, 0x74,
+ 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2d, 0x74,
+ 0x69, 0x6c, 0x65, 0x73, 0x0a, 0x1b, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f,
+ 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76,
+ 0x32, 0x42, 0x0e, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x56, 0x32, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2d, 0x74,
+ 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0xea, 0x02,
+ 0x13, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x52, 0x65, 0x6b, 0x6f, 0x72,
+ 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+})
+
+var (
+ file_rekor_v2_rekor_service_proto_rawDescOnce sync.Once
+ file_rekor_v2_rekor_service_proto_rawDescData []byte
+)
+
+func file_rekor_v2_rekor_service_proto_rawDescGZIP() []byte {
+ file_rekor_v2_rekor_service_proto_rawDescOnce.Do(func() {
+ file_rekor_v2_rekor_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_rekor_v2_rekor_service_proto_rawDesc), len(file_rekor_v2_rekor_service_proto_rawDesc)))
+ })
+ return file_rekor_v2_rekor_service_proto_rawDescData
+}
+
+var file_rekor_v2_rekor_service_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_rekor_v2_rekor_service_proto_goTypes = []any{
+ (*TileRequest)(nil), // 0: dev.sigstore.rekor.v2.TileRequest
+ (*EntryBundleRequest)(nil), // 1: dev.sigstore.rekor.v2.EntryBundleRequest
+ (*CreateEntryRequest)(nil), // 2: dev.sigstore.rekor.v2.CreateEntryRequest
+ (*emptypb.Empty)(nil), // 3: google.protobuf.Empty
+ (*v1.TransparencyLogEntry)(nil), // 4: dev.sigstore.rekor.v1.TransparencyLogEntry
+ (*httpbody.HttpBody)(nil), // 5: google.api.HttpBody
+}
+var file_rekor_v2_rekor_service_proto_depIdxs = []int32{
+ 2, // 0: dev.sigstore.rekor.v2.Rekor.CreateEntry:input_type -> dev.sigstore.rekor.v2.CreateEntryRequest
+ 0, // 1: dev.sigstore.rekor.v2.Rekor.GetTile:input_type -> dev.sigstore.rekor.v2.TileRequest
+ 1, // 2: dev.sigstore.rekor.v2.Rekor.GetEntryBundle:input_type -> dev.sigstore.rekor.v2.EntryBundleRequest
+ 3, // 3: dev.sigstore.rekor.v2.Rekor.GetCheckpoint:input_type -> google.protobuf.Empty
+ 4, // 4: dev.sigstore.rekor.v2.Rekor.CreateEntry:output_type -> dev.sigstore.rekor.v1.TransparencyLogEntry
+ 5, // 5: dev.sigstore.rekor.v2.Rekor.GetTile:output_type -> google.api.HttpBody
+ 5, // 6: dev.sigstore.rekor.v2.Rekor.GetEntryBundle:output_type -> google.api.HttpBody
+ 5, // 7: dev.sigstore.rekor.v2.Rekor.GetCheckpoint:output_type -> google.api.HttpBody
+ 4, // [4:8] is the sub-list for method output_type
+ 0, // [0:4] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_rekor_v2_rekor_service_proto_init() }
+func file_rekor_v2_rekor_service_proto_init() {
+ if File_rekor_v2_rekor_service_proto != nil {
+ return
+ }
+ file_rekor_v2_entry_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_rekor_v2_rekor_service_proto_rawDesc), len(file_rekor_v2_rekor_service_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_rekor_v2_rekor_service_proto_goTypes,
+ DependencyIndexes: file_rekor_v2_rekor_service_proto_depIdxs,
+ MessageInfos: file_rekor_v2_rekor_service_proto_msgTypes,
+ }.Build()
+ File_rekor_v2_rekor_service_proto = out.File
+ file_rekor_v2_rekor_service_proto_goTypes = nil
+ file_rekor_v2_rekor_service_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service.pb.gw.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service.pb.gw.go
new file mode 100644
index 00000000000..e0d4857bee4
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service.pb.gw.go
@@ -0,0 +1,381 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: rekor/v2/rekor_service.proto
+
+/*
+Package protobuf is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package protobuf
+
+import (
+ "context"
+ "errors"
+ "io"
+ "net/http"
+
+ "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/emptypb"
+)
+
+// Suppress "imported and not used" errors
+var (
+ _ codes.Code
+ _ io.Reader
+ _ status.Status
+ _ = errors.New
+ _ = runtime.String
+ _ = utilities.NewDoubleArray
+ _ = metadata.Join
+)
+
+func request_Rekor_CreateEntry_0(ctx context.Context, marshaler runtime.Marshaler, client RekorClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq CreateEntryRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.CreateEntry(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+}
+
+func local_request_Rekor_CreateEntry_0(ctx context.Context, marshaler runtime.Marshaler, server RekorServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq CreateEntryRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.CreateEntry(ctx, &protoReq)
+ return msg, metadata, err
+}
+
+func request_Rekor_GetTile_0(ctx context.Context, marshaler runtime.Marshaler, client RekorClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq TileRequest
+ metadata runtime.ServerMetadata
+ err error
+ )
+ io.Copy(io.Discard, req.Body)
+ val, ok := pathParams["L"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "L")
+ }
+ protoReq.L, err = runtime.Uint32(val)
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "L", err)
+ }
+ val, ok = pathParams["N"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "N")
+ }
+ protoReq.N, err = runtime.String(val)
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "N", err)
+ }
+ msg, err := client.GetTile(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+}
+
+func local_request_Rekor_GetTile_0(ctx context.Context, marshaler runtime.Marshaler, server RekorServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq TileRequest
+ metadata runtime.ServerMetadata
+ err error
+ )
+ val, ok := pathParams["L"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "L")
+ }
+ protoReq.L, err = runtime.Uint32(val)
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "L", err)
+ }
+ val, ok = pathParams["N"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "N")
+ }
+ protoReq.N, err = runtime.String(val)
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "N", err)
+ }
+ msg, err := server.GetTile(ctx, &protoReq)
+ return msg, metadata, err
+}
+
+func request_Rekor_GetEntryBundle_0(ctx context.Context, marshaler runtime.Marshaler, client RekorClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq EntryBundleRequest
+ metadata runtime.ServerMetadata
+ err error
+ )
+ io.Copy(io.Discard, req.Body)
+ val, ok := pathParams["N"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "N")
+ }
+ protoReq.N, err = runtime.String(val)
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "N", err)
+ }
+ msg, err := client.GetEntryBundle(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+}
+
+func local_request_Rekor_GetEntryBundle_0(ctx context.Context, marshaler runtime.Marshaler, server RekorServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq EntryBundleRequest
+ metadata runtime.ServerMetadata
+ err error
+ )
+ val, ok := pathParams["N"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "N")
+ }
+ protoReq.N, err = runtime.String(val)
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "N", err)
+ }
+ msg, err := server.GetEntryBundle(ctx, &protoReq)
+ return msg, metadata, err
+}
+
+func request_Rekor_GetCheckpoint_0(ctx context.Context, marshaler runtime.Marshaler, client RekorClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq emptypb.Empty
+ metadata runtime.ServerMetadata
+ )
+ io.Copy(io.Discard, req.Body)
+ msg, err := client.GetCheckpoint(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+}
+
+func local_request_Rekor_GetCheckpoint_0(ctx context.Context, marshaler runtime.Marshaler, server RekorServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq emptypb.Empty
+ metadata runtime.ServerMetadata
+ )
+ msg, err := server.GetCheckpoint(ctx, &protoReq)
+ return msg, metadata, err
+}
+
+// RegisterRekorHandlerServer registers the http handlers for service Rekor to "mux".
+// UnaryRPC :call RekorServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterRekorHandlerFromEndpoint instead.
+// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
+func RegisterRekorHandlerServer(ctx context.Context, mux *runtime.ServeMux, server RekorServer) error {
+ mux.Handle(http.MethodPost, pattern_Rekor_CreateEntry_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/CreateEntry", runtime.WithHTTPPathPattern("/api/v2/log/entries"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Rekor_CreateEntry_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Rekor_CreateEntry_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodGet, pattern_Rekor_GetTile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/GetTile", runtime.WithHTTPPathPattern("/api/v2/tile/{L}/{N=**}"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Rekor_GetTile_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Rekor_GetTile_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodGet, pattern_Rekor_GetEntryBundle_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/GetEntryBundle", runtime.WithHTTPPathPattern("/api/v2/tile/entries/{N=**}"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Rekor_GetEntryBundle_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Rekor_GetEntryBundle_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodGet, pattern_Rekor_GetCheckpoint_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/GetCheckpoint", runtime.WithHTTPPathPattern("/api/v2/checkpoint"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Rekor_GetCheckpoint_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Rekor_GetCheckpoint_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+
+ return nil
+}
+
+// RegisterRekorHandlerFromEndpoint is same as RegisterRekorHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterRekorHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.NewClient(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+ return RegisterRekorHandler(ctx, mux, conn)
+}
+
+// RegisterRekorHandler registers the http handlers for service Rekor to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterRekorHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterRekorHandlerClient(ctx, mux, NewRekorClient(conn))
+}
+
+// RegisterRekorHandlerClient registers the http handlers for service Rekor
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "RekorClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "RekorClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "RekorClient" to call the correct interceptors. This client ignores the HTTP middlewares.
+func RegisterRekorHandlerClient(ctx context.Context, mux *runtime.ServeMux, client RekorClient) error {
+ mux.Handle(http.MethodPost, pattern_Rekor_CreateEntry_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/CreateEntry", runtime.WithHTTPPathPattern("/api/v2/log/entries"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Rekor_CreateEntry_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Rekor_CreateEntry_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodGet, pattern_Rekor_GetTile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/GetTile", runtime.WithHTTPPathPattern("/api/v2/tile/{L}/{N=**}"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Rekor_GetTile_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Rekor_GetTile_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodGet, pattern_Rekor_GetEntryBundle_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/GetEntryBundle", runtime.WithHTTPPathPattern("/api/v2/tile/entries/{N=**}"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Rekor_GetEntryBundle_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Rekor_GetEntryBundle_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodGet, pattern_Rekor_GetCheckpoint_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/dev.sigstore.rekor.v2.Rekor/GetCheckpoint", runtime.WithHTTPPathPattern("/api/v2/checkpoint"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Rekor_GetCheckpoint_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Rekor_GetCheckpoint_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ return nil
+}
+
+var (
+ pattern_Rekor_CreateEntry_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v2", "log", "entries"}, ""))
+ pattern_Rekor_GetTile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 3, 0, 4, 1, 5, 4}, []string{"api", "v2", "tile", "L", "N"}, ""))
+ pattern_Rekor_GetEntryBundle_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 3, 0, 4, 1, 5, 4}, []string{"api", "v2", "tile", "entries", "N"}, ""))
+ pattern_Rekor_GetCheckpoint_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v2", "checkpoint"}, ""))
+)
+
+var (
+ forward_Rekor_CreateEntry_0 = runtime.ForwardResponseMessage
+ forward_Rekor_GetTile_0 = runtime.ForwardResponseMessage
+ forward_Rekor_GetEntryBundle_0 = runtime.ForwardResponseMessage
+ forward_Rekor_GetCheckpoint_0 = runtime.ForwardResponseMessage
+)
diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service_grpc.pb.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service_grpc.pb.go
new file mode 100644
index 00000000000..807d630524a
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/rekor_service_grpc.pb.go
@@ -0,0 +1,266 @@
+// Copyright 2025 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v6.30.2
+// source: rekor/v2/rekor_service.proto
+
+package protobuf
+
+import (
+ context "context"
+ v1 "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1"
+ httpbody "google.golang.org/genproto/googleapis/api/httpbody"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
+
+const (
+ Rekor_CreateEntry_FullMethodName = "/dev.sigstore.rekor.v2.Rekor/CreateEntry"
+ Rekor_GetTile_FullMethodName = "/dev.sigstore.rekor.v2.Rekor/GetTile"
+ Rekor_GetEntryBundle_FullMethodName = "/dev.sigstore.rekor.v2.Rekor/GetEntryBundle"
+ Rekor_GetCheckpoint_FullMethodName = "/dev.sigstore.rekor.v2.Rekor/GetCheckpoint"
+)
+
+// RekorClient is the client API for Rekor service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+//
+// A service for sigstore clients to connect to to create log entries
+// and for log monitors and witnesses to audit/inspect the log
+type RekorClient interface {
+ // Create an entry in the log
+ CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*v1.TransparencyLogEntry, error)
+ // Get a tile from the log
+ GetTile(ctx context.Context, in *TileRequest, opts ...grpc.CallOption) (*httpbody.HttpBody, error)
+ // Get an entry bundle from the log
+ GetEntryBundle(ctx context.Context, in *EntryBundleRequest, opts ...grpc.CallOption) (*httpbody.HttpBody, error)
+ // Get a checkpoint from the log
+ GetCheckpoint(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*httpbody.HttpBody, error)
+}
+
+type rekorClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewRekorClient(cc grpc.ClientConnInterface) RekorClient {
+ return &rekorClient{cc}
+}
+
+func (c *rekorClient) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*v1.TransparencyLogEntry, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(v1.TransparencyLogEntry)
+ err := c.cc.Invoke(ctx, Rekor_CreateEntry_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *rekorClient) GetTile(ctx context.Context, in *TileRequest, opts ...grpc.CallOption) (*httpbody.HttpBody, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(httpbody.HttpBody)
+ err := c.cc.Invoke(ctx, Rekor_GetTile_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *rekorClient) GetEntryBundle(ctx context.Context, in *EntryBundleRequest, opts ...grpc.CallOption) (*httpbody.HttpBody, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(httpbody.HttpBody)
+ err := c.cc.Invoke(ctx, Rekor_GetEntryBundle_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *rekorClient) GetCheckpoint(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*httpbody.HttpBody, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(httpbody.HttpBody)
+ err := c.cc.Invoke(ctx, Rekor_GetCheckpoint_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// RekorServer is the server API for Rekor service.
+// All implementations must embed UnimplementedRekorServer
+// for forward compatibility.
+//
+// A service for sigstore clients to connect to to create log entries
+// and for log monitors and witnesses to audit/inspect the log
+type RekorServer interface {
+ // Create an entry in the log
+ CreateEntry(context.Context, *CreateEntryRequest) (*v1.TransparencyLogEntry, error)
+ // Get a tile from the log
+ GetTile(context.Context, *TileRequest) (*httpbody.HttpBody, error)
+ // Get an entry bundle from the log
+ GetEntryBundle(context.Context, *EntryBundleRequest) (*httpbody.HttpBody, error)
+ // Get a checkpoint from the log
+ GetCheckpoint(context.Context, *emptypb.Empty) (*httpbody.HttpBody, error)
+ mustEmbedUnimplementedRekorServer()
+}
+
+// UnimplementedRekorServer must be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedRekorServer struct{}
+
+func (UnimplementedRekorServer) CreateEntry(context.Context, *CreateEntryRequest) (*v1.TransparencyLogEntry, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateEntry not implemented")
+}
+func (UnimplementedRekorServer) GetTile(context.Context, *TileRequest) (*httpbody.HttpBody, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetTile not implemented")
+}
+func (UnimplementedRekorServer) GetEntryBundle(context.Context, *EntryBundleRequest) (*httpbody.HttpBody, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetEntryBundle not implemented")
+}
+func (UnimplementedRekorServer) GetCheckpoint(context.Context, *emptypb.Empty) (*httpbody.HttpBody, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetCheckpoint not implemented")
+}
+func (UnimplementedRekorServer) mustEmbedUnimplementedRekorServer() {}
+func (UnimplementedRekorServer) testEmbeddedByValue() {}
+
+// UnsafeRekorServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to RekorServer will
+// result in compilation errors.
+type UnsafeRekorServer interface {
+ mustEmbedUnimplementedRekorServer()
+}
+
+func RegisterRekorServer(s grpc.ServiceRegistrar, srv RekorServer) {
+ // If the following call pancis, it indicates UnimplementedRekorServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
+ s.RegisterService(&Rekor_ServiceDesc, srv)
+}
+
+func _Rekor_CreateEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateEntryRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(RekorServer).CreateEntry(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Rekor_CreateEntry_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(RekorServer).CreateEntry(ctx, req.(*CreateEntryRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Rekor_GetTile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(TileRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(RekorServer).GetTile(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Rekor_GetTile_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(RekorServer).GetTile(ctx, req.(*TileRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Rekor_GetEntryBundle_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(EntryBundleRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(RekorServer).GetEntryBundle(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Rekor_GetEntryBundle_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(RekorServer).GetEntryBundle(ctx, req.(*EntryBundleRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Rekor_GetCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(emptypb.Empty)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(RekorServer).GetCheckpoint(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Rekor_GetCheckpoint_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(RekorServer).GetCheckpoint(ctx, req.(*emptypb.Empty))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// Rekor_ServiceDesc is the grpc.ServiceDesc for Rekor service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var Rekor_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "dev.sigstore.rekor.v2.Rekor",
+ HandlerType: (*RekorServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "CreateEntry",
+ Handler: _Rekor_CreateEntry_Handler,
+ },
+ {
+ MethodName: "GetTile",
+ Handler: _Rekor_GetTile_Handler,
+ },
+ {
+ MethodName: "GetEntryBundle",
+ Handler: _Rekor_GetEntryBundle_Handler,
+ },
+ {
+ MethodName: "GetCheckpoint",
+ Handler: _Rekor_GetCheckpoint_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "rekor/v2/rekor_service.proto",
+}
diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/verifier.pb.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/verifier.pb.go
new file mode 100644
index 00000000000..8b9b4f1f81e
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf/verifier.pb.go
@@ -0,0 +1,338 @@
+// Copyright 2025 The Sigstore Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.5
+// protoc v6.30.2
+// source: rekor/v2/verifier.proto
+
+package protobuf
+
+import (
+ v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// PublicKey contains an encoded public key
+type PublicKey struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // DER-encoded public key
+ RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes,proto3" json:"raw_bytes,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *PublicKey) Reset() {
+ *x = PublicKey{}
+ mi := &file_rekor_v2_verifier_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *PublicKey) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PublicKey) ProtoMessage() {}
+
+func (x *PublicKey) ProtoReflect() protoreflect.Message {
+ mi := &file_rekor_v2_verifier_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PublicKey.ProtoReflect.Descriptor instead.
+func (*PublicKey) Descriptor() ([]byte, []int) {
+ return file_rekor_v2_verifier_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *PublicKey) GetRawBytes() []byte {
+ if x != nil {
+ return x.RawBytes
+ }
+ return nil
+}
+
+// Either a public key or a X.509 cerificiate with an embedded public key
+type Verifier struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Verifier:
+ //
+ // *Verifier_PublicKey
+ // *Verifier_X509Certificate
+ Verifier isVerifier_Verifier `protobuf_oneof:"verifier"`
+ // Key encoding and signature algorithm to use for this key
+ KeyDetails v1.PublicKeyDetails `protobuf:"varint,3,opt,name=key_details,json=keyDetails,proto3,enum=dev.sigstore.common.v1.PublicKeyDetails" json:"key_details,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Verifier) Reset() {
+ *x = Verifier{}
+ mi := &file_rekor_v2_verifier_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Verifier) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Verifier) ProtoMessage() {}
+
+func (x *Verifier) ProtoReflect() protoreflect.Message {
+ mi := &file_rekor_v2_verifier_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Verifier.ProtoReflect.Descriptor instead.
+func (*Verifier) Descriptor() ([]byte, []int) {
+ return file_rekor_v2_verifier_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Verifier) GetVerifier() isVerifier_Verifier {
+ if x != nil {
+ return x.Verifier
+ }
+ return nil
+}
+
+func (x *Verifier) GetPublicKey() *PublicKey {
+ if x != nil {
+ if x, ok := x.Verifier.(*Verifier_PublicKey); ok {
+ return x.PublicKey
+ }
+ }
+ return nil
+}
+
+func (x *Verifier) GetX509Certificate() *v1.X509Certificate {
+ if x != nil {
+ if x, ok := x.Verifier.(*Verifier_X509Certificate); ok {
+ return x.X509Certificate
+ }
+ }
+ return nil
+}
+
+func (x *Verifier) GetKeyDetails() v1.PublicKeyDetails {
+ if x != nil {
+ return x.KeyDetails
+ }
+ return v1.PublicKeyDetails(0)
+}
+
+type isVerifier_Verifier interface {
+ isVerifier_Verifier()
+}
+
+type Verifier_PublicKey struct {
+ // DER-encoded public key. Encoding method is specified by the key_details attribute
+ PublicKey *PublicKey `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3,oneof"`
+}
+
+type Verifier_X509Certificate struct {
+ // DER-encoded certificate
+ X509Certificate *v1.X509Certificate `protobuf:"bytes,2,opt,name=x509_certificate,json=x509Certificate,proto3,oneof"`
+}
+
+func (*Verifier_PublicKey) isVerifier_Verifier() {}
+
+func (*Verifier_X509Certificate) isVerifier_Verifier() {}
+
+// A signature and an associated verifier
+type Signature struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Content []byte `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"`
+ Verifier *Verifier `protobuf:"bytes,2,opt,name=verifier,proto3" json:"verifier,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Signature) Reset() {
+ *x = Signature{}
+ mi := &file_rekor_v2_verifier_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Signature) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Signature) ProtoMessage() {}
+
+func (x *Signature) ProtoReflect() protoreflect.Message {
+ mi := &file_rekor_v2_verifier_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Signature.ProtoReflect.Descriptor instead.
+func (*Signature) Descriptor() ([]byte, []int) {
+ return file_rekor_v2_verifier_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Signature) GetContent() []byte {
+ if x != nil {
+ return x.Content
+ }
+ return nil
+}
+
+func (x *Signature) GetVerifier() *Verifier {
+ if x != nil {
+ return x.Verifier
+ }
+ return nil
+}
+
+var File_rekor_v2_verifier_proto protoreflect.FileDescriptor
+
+var file_rekor_v2_verifier_proto_rawDesc = string([]byte{
+ 0x0a, 0x17, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x65, 0x72, 0x69, 0x66,
+ 0x69, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x64, 0x65, 0x76, 0x2e, 0x73,
+ 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32,
+ 0x1a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
+ 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69,
+ 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2d, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c,
+ 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74,
+ 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72,
+ 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x08, 0x56, 0x65, 0x72, 0x69,
+ 0x66, 0x69, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b,
+ 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73,
+ 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32,
+ 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x48,
+ 0x00, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x59, 0x0a, 0x10,
+ 0x78, 0x35, 0x30, 0x39, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67,
+ 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e,
+ 0x58, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x48, 0x00, 0x52, 0x0f, 0x78, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x4e, 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x64,
+ 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x64,
+ 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d,
+ 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x44,
+ 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x6b, 0x65, 0x79,
+ 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66,
+ 0x69, 0x65, 0x72, 0x22, 0x6c, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
+ 0x12, 0x1d, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12,
+ 0x40, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1f, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65,
+ 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69,
+ 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65,
+ 0x72, 0x42, 0x81, 0x01, 0x0a, 0x1b, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f,
+ 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2e, 0x76,
+ 0x32, 0x42, 0x0f, 0x52, 0x65, 0x6b, 0x6f, 0x72, 0x56, 0x32, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69,
+ 0x65, 0x72, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x2d,
+ 0x74, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0xea,
+ 0x02, 0x13, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x52, 0x65, 0x6b, 0x6f,
+ 0x72, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+})
+
+var (
+ file_rekor_v2_verifier_proto_rawDescOnce sync.Once
+ file_rekor_v2_verifier_proto_rawDescData []byte
+)
+
+func file_rekor_v2_verifier_proto_rawDescGZIP() []byte {
+ file_rekor_v2_verifier_proto_rawDescOnce.Do(func() {
+ file_rekor_v2_verifier_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_rekor_v2_verifier_proto_rawDesc), len(file_rekor_v2_verifier_proto_rawDesc)))
+ })
+ return file_rekor_v2_verifier_proto_rawDescData
+}
+
+var file_rekor_v2_verifier_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_rekor_v2_verifier_proto_goTypes = []any{
+ (*PublicKey)(nil), // 0: dev.sigstore.rekor.v2.PublicKey
+ (*Verifier)(nil), // 1: dev.sigstore.rekor.v2.Verifier
+ (*Signature)(nil), // 2: dev.sigstore.rekor.v2.Signature
+ (*v1.X509Certificate)(nil), // 3: dev.sigstore.common.v1.X509Certificate
+ (v1.PublicKeyDetails)(0), // 4: dev.sigstore.common.v1.PublicKeyDetails
+}
+var file_rekor_v2_verifier_proto_depIdxs = []int32{
+ 0, // 0: dev.sigstore.rekor.v2.Verifier.public_key:type_name -> dev.sigstore.rekor.v2.PublicKey
+ 3, // 1: dev.sigstore.rekor.v2.Verifier.x509_certificate:type_name -> dev.sigstore.common.v1.X509Certificate
+ 4, // 2: dev.sigstore.rekor.v2.Verifier.key_details:type_name -> dev.sigstore.common.v1.PublicKeyDetails
+ 1, // 3: dev.sigstore.rekor.v2.Signature.verifier:type_name -> dev.sigstore.rekor.v2.Verifier
+ 4, // [4:4] is the sub-list for method output_type
+ 4, // [4:4] is the sub-list for method input_type
+ 4, // [4:4] is the sub-list for extension type_name
+ 4, // [4:4] is the sub-list for extension extendee
+ 0, // [0:4] is the sub-list for field type_name
+}
+
+func init() { file_rekor_v2_verifier_proto_init() }
+func file_rekor_v2_verifier_proto_init() {
+ if File_rekor_v2_verifier_proto != nil {
+ return
+ }
+ file_rekor_v2_verifier_proto_msgTypes[1].OneofWrappers = []any{
+ (*Verifier_PublicKey)(nil),
+ (*Verifier_X509Certificate)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_rekor_v2_verifier_proto_rawDesc), len(file_rekor_v2_verifier_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_rekor_v2_verifier_proto_goTypes,
+ DependencyIndexes: file_rekor_v2_verifier_proto_depIdxs,
+ MessageInfos: file_rekor_v2_verifier_proto_msgTypes,
+ }.Build()
+ File_rekor_v2_verifier_proto = out.File
+ file_rekor_v2_verifier_proto_goTypes = nil
+ file_rekor_v2_verifier_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/note/note.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/note/note.go
new file mode 100644
index 00000000000..03bd6b839cf
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/note/note.go
@@ -0,0 +1,219 @@
+/*
+Copyright 2025 The Sigstore Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Heavily borrowed from https://gist.githubusercontent.com/AlCutter/c6c69076dc55652e2d278900ccc1a5e7/raw/aac2bafc17a8efa162bd99b4453070b724779307/ecdsa_note.go - thanks, Al
+
+package note
+
+import (
+ "bytes"
+ "context"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/binary"
+ "fmt"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/sigstore/sigstore/pkg/signature"
+ "github.com/sigstore/sigstore/pkg/signature/options"
+ "golang.org/x/mod/sumdb/note"
+)
+
+const (
+ algEd25519 = 1
+ algUndef = 255
+ rsaID = "PKIX-RSA-PKCS#1v1.5"
+)
+
+// noteSigner uses an arbitrary sigstore signer to implement golang.org/x/mod/sumdb/note.Signer,
+// which is used in Tessera to sign checkpoints in the signed notes format
+// (https://github.com/C2SP/C2SP/blob/main/signed-note.md).
+type noteSigner struct {
+ name string
+ hash uint32
+ sign func(msg []byte) ([]byte, error)
+}
+
+// Name returns the server name associated with the key.
+func (n *noteSigner) Name() string {
+ return n.name
+}
+
+// KeyHash returns the key hash.
+func (n *noteSigner) KeyHash() uint32 {
+ return n.hash
+}
+
+// Sign returns a signature for the given message.
+func (n *noteSigner) Sign(msg []byte) ([]byte, error) {
+ return n.sign(msg)
+}
+
+type noteVerifier struct {
+ name string
+ hash uint32
+ verify func(msg, sig []byte) bool
+}
+
+// Name implements note.Verifier.
+func (n *noteVerifier) Name() string {
+ return n.name
+}
+
+// Keyhash implements note.Verifier.
+func (n *noteVerifier) KeyHash() uint32 {
+ return n.hash
+}
+
+// Verify implements note.Verifier.
+func (n *noteVerifier) Verify(msg, sig []byte) bool {
+ return n.verify(msg, sig)
+}
+
+// isValidName reports whether the name conforms to the spec for the origin string of the note text
+// as defined in https://github.com/C2SP/C2SP/blob/main/tlog-checkpoint.md#note-text.
+func isValidName(name string) bool {
+ return name != "" && utf8.ValidString(name) && strings.IndexFunc(name, unicode.IsSpace) < 0 && !strings.Contains(name, "+")
+}
+
+// genConformantKeyHash generates a truncated (4-byte) and non-truncated
+// identifier for typical (non-ECDSA) keys.
+func genConformantKeyHash(name string, sigType, key []byte) (uint32, []byte) {
+ hash := sha256.New()
+ hash.Write([]byte(name))
+ hash.Write([]byte("\n"))
+ hash.Write(sigType)
+ hash.Write(key)
+ sum := hash.Sum(nil)
+ return binary.BigEndian.Uint32(sum), sum
+}
+
+// ed25519KeyHash generates the 4-byte key ID for an Ed25519 public key.
+// Ed25519 keys are the only key type compatible with witnessing.
+func ed25519KeyHash(name string, key []byte) (uint32, []byte) {
+ return genConformantKeyHash(name, []byte{algEd25519}, key)
+}
+
+// ecdsaKeyHash generates the 4-byte key ID for an ECDSA public key.
+// ECDSA key IDs do not conform to the note standard for other key type
+// (see https://github.com/C2SP/C2SP/blob/8991f70ddf8a11de3a68d5a081e7be27e59d87c8/signed-note.md#signature-types).
+func ecdsaKeyHash(key *ecdsa.PublicKey) (uint32, []byte, error) {
+ marshaled, err := x509.MarshalPKIXPublicKey(key)
+ if err != nil {
+ return 0, nil, fmt.Errorf("marshaling public key: %w", err)
+ }
+ hash := sha256.Sum256(marshaled)
+ return binary.BigEndian.Uint32(hash[:]), hash[:], nil
+}
+
+// rsaKeyhash generates the 4-byte key ID for an RSA public key.
+func rsaKeyHash(name string, key *rsa.PublicKey) (uint32, []byte, error) {
+ marshaled, err := x509.MarshalPKIXPublicKey(key)
+ if err != nil {
+ return 0, nil, fmt.Errorf("marshaling public key: %w", err)
+ }
+ rsaAlg := append([]byte{algUndef}, []byte(rsaID)...)
+ id, hash := genConformantKeyHash(name, rsaAlg, marshaled)
+ return id, hash, nil
+}
+
+// KeyHash generates a truncated (4-byte) and non-truncated identifier for a
+// public key/origin
+func KeyHash(origin string, key crypto.PublicKey) (uint32, []byte, error) {
+ var keyID uint32
+ var logID []byte
+ var err error
+
+ switch pk := key.(type) {
+ case *ecdsa.PublicKey:
+ keyID, logID, err = ecdsaKeyHash(pk)
+ if err != nil {
+ return 0, nil, fmt.Errorf("getting ECDSA key hash: %w", err)
+ }
+ case ed25519.PublicKey:
+ keyID, logID = ed25519KeyHash(origin, pk)
+ case *rsa.PublicKey:
+ keyID, logID, err = rsaKeyHash(origin, pk)
+ if err != nil {
+ return 0, nil, fmt.Errorf("getting RSA key hash: %w", err)
+ }
+ default:
+ return 0, nil, fmt.Errorf("unsupported key type: %T", key)
+ }
+
+ return keyID, logID, nil
+}
+
+// NewNoteSigner converts a sigstore/sigstore/pkg/signature.Signer into a note.Signer.
+func NewNoteSigner(ctx context.Context, origin string, signer signature.Signer) (note.Signer, error) {
+ if !isValidName(origin) {
+ return nil, fmt.Errorf("invalid name %s", origin)
+ }
+
+ pubKey, err := signer.PublicKey()
+ if err != nil {
+ return nil, fmt.Errorf("getting public key: %w", err)
+ }
+
+ keyID, _, err := KeyHash(origin, pubKey)
+ if err != nil {
+ return nil, err
+ }
+
+ sign := func(msg []byte) ([]byte, error) {
+ return signer.SignMessage(bytes.NewReader(msg), options.WithContext(ctx))
+ }
+
+ return ¬eSigner{
+ name: origin,
+ hash: keyID,
+ sign: sign,
+ }, nil
+}
+
+// NewNoteVerifier converts a sigstore/sigstore/pkg/signature.Verifier into a note.Verifier.
+func NewNoteVerifier(origin string, verifier signature.Verifier) (note.Verifier, error) {
+ if !isValidName(origin) {
+ return nil, fmt.Errorf("invalid name %s", origin)
+ }
+
+ pubKey, err := verifier.PublicKey()
+ if err != nil {
+ return nil, fmt.Errorf("getting public key: %w", err)
+ }
+
+ keyID, _, err := KeyHash(origin, pubKey)
+ if err != nil {
+ return nil, err
+ }
+
+ return ¬eVerifier{
+ name: origin,
+ hash: keyID,
+ verify: func(msg, sig []byte) bool {
+ if err := verifier.VerifySignature(bytes.NewReader(sig), bytes.NewReader(msg)); err != nil {
+ return false
+ }
+ return true
+ },
+ }, nil
+}
diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/types/verifier/verifier.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/types/verifier/verifier.go
new file mode 100644
index 00000000000..b080977229d
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/types/verifier/verifier.go
@@ -0,0 +1,41 @@
+// Copyright 2025 The Sigstore Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package verifier
+
+import (
+ "fmt"
+
+ pb "github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf"
+)
+
+// Validate validates there are no missing field in a Verifier protobuf
+func Validate(v *pb.Verifier) error {
+ publicKey := v.GetPublicKey()
+ x509Cert := v.GetX509Certificate()
+ if publicKey == nil && x509Cert == nil {
+ return fmt.Errorf("missing signature public key or X.509 certificate")
+ }
+ if publicKey != nil {
+ if len(publicKey.GetRawBytes()) == 0 {
+ return fmt.Errorf("missing public key raw bytes")
+ }
+ }
+ if x509Cert != nil {
+ if len(x509Cert.GetRawBytes()) == 0 {
+ return fmt.Errorf("missing X.509 certificate raw bytes")
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor-tiles/v2/pkg/verify/verify.go b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/verify/verify.go
new file mode 100644
index 00000000000..bb4fedf591c
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor-tiles/v2/pkg/verify/verify.go
@@ -0,0 +1,89 @@
+//
+// Copyright 2025 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package verify
+
+import (
+ "fmt"
+
+ pbs "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1"
+ "github.com/sigstore/rekor-tiles/v2/internal/safeint"
+ f_log "github.com/transparency-dev/formats/log"
+ "github.com/transparency-dev/merkle/proof"
+ "github.com/transparency-dev/merkle/rfc6962"
+ sumdb_note "golang.org/x/mod/sumdb/note"
+)
+
+// VerifyInclusionProof verifies an entry's inclusion proof
+func VerifyInclusionProof(entry *pbs.TransparencyLogEntry, cp *f_log.Checkpoint) error { //nolint: revive
+ leafHash := rfc6962.DefaultHasher.HashLeaf(entry.CanonicalizedBody)
+ index, err := safeint.NewSafeInt64(entry.LogIndex)
+ if err != nil {
+ return fmt.Errorf("invalid index: %w", err)
+ }
+ if err := proof.VerifyInclusion(rfc6962.DefaultHasher, index.U(), cp.Size, leafHash, entry.InclusionProof.Hashes, cp.Hash); err != nil {
+ return fmt.Errorf("verifying inclusion: %w", err)
+ }
+ return nil
+}
+
+// VerifyCheckpoint verifies the signature on the entry's inclusion proof checkpoint
+func VerifyCheckpoint(unverifiedCp string, verifier sumdb_note.Verifier) (*f_log.Checkpoint, error) { //nolint: revive
+ cp, _, _, err := f_log.ParseCheckpoint([]byte(unverifiedCp), verifier.Name(), verifier)
+ if err != nil {
+ return nil, fmt.Errorf("unverified checkpoint signature: %v", err)
+ }
+ return cp, nil
+}
+
+// VerifyWitnessedCheckpoint verifies the signature on the entry's inclusion proof checkpoint in addition to witness cosignatures.
+// This returns the underlying note which contains all verified signatures.
+func VerifyWitnessedCheckpoint(unverifiedCp string, verifier sumdb_note.Verifier, otherVerifiers ...sumdb_note.Verifier) (*f_log.Checkpoint, *sumdb_note.Note, error) { //nolint: revive
+ cp, _, n, err := f_log.ParseCheckpoint([]byte(unverifiedCp), verifier.Name(), verifier, otherVerifiers...)
+ if err != nil {
+ return nil, nil, fmt.Errorf("unverified checkpoint signature: %v", err)
+ }
+ return cp, n, nil
+}
+
+// VerifyLogEntry verifies the log entry. This includes verifying the signature on the entry's
+// inclusion proof checkpoint and verifying the entry inclusion proof
+func VerifyLogEntry(entry *pbs.TransparencyLogEntry, verifier sumdb_note.Verifier) error { //nolint: revive
+ cp, err := VerifyCheckpoint(entry.GetInclusionProof().GetCheckpoint().GetEnvelope(), verifier)
+ if err != nil {
+ return err
+ }
+ return VerifyInclusionProof(entry, cp)
+}
+
+// VerifyConsistencyProof verifies the latest checkpoint signature and the consistency proof between a previous log size
+// and root hash and the latest checkpoint's size and root hash. This may be used by a C2SP witness.
+func VerifyConsistencyProof(consistencyProof [][]byte, oldSize uint64, oldRootHash []byte, newUnverifiedCp string, verifier sumdb_note.Verifier) error { //nolint: revive
+ newCp, err := VerifyCheckpoint(newUnverifiedCp, verifier)
+ if err != nil {
+ return err
+ }
+ return proof.VerifyConsistency(rfc6962.DefaultHasher, oldSize, newCp.Size, consistencyProof, oldRootHash, newCp.Hash)
+}
+
+// VerifyConsistencyProofWithCheckpoints verifies previous and latest checkpoint signatures and the consistency proof
+// between these checkpoints. This may be used by a monitor that persists checkpoints.
+func VerifyConsistencyProofWithCheckpoints(consistencyProof [][]byte, oldUnverifiedCp, newUnverifiedCp string, verifier sumdb_note.Verifier) error { //nolint: revive
+ oldCp, err := VerifyCheckpoint(oldUnverifiedCp, verifier)
+ if err != nil {
+ return err
+ }
+ return VerifyConsistencyProof(consistencyProof, oldCp.Size, oldCp.Hash, newUnverifiedCp, verifier)
+}
diff --git a/vendor/github.com/sigstore/rekor/CONTRIBUTORS.md b/vendor/github.com/sigstore/rekor/CONTRIBUTORS.md
new file mode 100644
index 00000000000..bdff02765c5
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/CONTRIBUTORS.md
@@ -0,0 +1,122 @@
+# Contributing
+
+When contributing to this repository, please first discuss the change you wish
+to make via an [issue](https://github.com/sigstore/rekor/issues).
+
+## Pull Request Process
+
+1. Create an [issue](https://github.com/sigstore/rekor/issues)
+ outlining the fix or feature.
+2. Fork the rekor repository to your own github account and clone it locally.
+3. Hack on your changes.
+4. Update the README.md with details of changes to any interface, this includes new environment
+ variables, exposed ports, useful file locations, CLI parameters and
+ new or changed configuration values.
+5. Correctly format your commit message see [Commit Messages](#Commit Message Guidelines)
+ below.
+6. Ensure that CI passes, if it fails, fix the failures.
+7. Every pull request requires a review from the [core rekor team](https://github.com/orgs/github.com/sigstore/teams/core-team)
+ before merging.
+8. If your pull request consists of more than one commit, please squash your
+ commits as described in [Squash Commits](#Squash Commits)
+
+## Commit Message Guidelines
+
+We follow the commit formatting recommendations found on [Chris Beams' How to Write a Git Commit Message article]((https://chris.beams.io/posts/git-commit/).
+
+Well formed commit messages not only help reviewers understand the nature of
+the Pull Request, but also assists the release process where commit messages
+are used to generate release notes.
+
+A good example of a commit message would be as follows:
+
+```
+Summarize changes in around 50 characters or less
+
+More detailed explanatory text, if necessary. Wrap it to about 72
+characters or so. In some contexts, the first line is treated as the
+subject of the commit and the rest of the text as the body. The
+blank line separating the summary from the body is critical (unless
+you omit the body entirely); various tools like `log`, `shortlog`
+and `rebase` can get confused if you run the two together.
+
+Explain the problem that this commit is solving. Focus on why you
+are making this change as opposed to how (the code explains that).
+Are there side effects or other unintuitive consequences of this
+change? Here's the place to explain them.
+
+Further paragraphs come after blank lines.
+
+ - Bullet points are okay, too
+
+ - Typically a hyphen or asterisk is used for the bullet, preceded
+ by a single space, with blank lines in between, but conventions
+ vary here
+
+If you use an issue tracker, put references to them at the bottom,
+like this:
+
+Resolves: #123
+See also: #456, #789
+```
+
+Note the `Resolves #123` tag, this references the issue raised and allows us to
+ensure issues are associated and closed when a pull request is merged.
+
+Please refer to [the github help page on message types](https://help.github.com/articles/closing-issues-using-keywords/)
+for a complete list of issue references.
+
+## Squash Commits
+
+Should your pull request consist of more than one commit (perhaps due to
+a change being requested during the review cycle), please perform a git squash
+once a reviewer has approved your pull request.
+
+A squash can be performed as follows. Let's say you have the following commits:
+
+ initial commit
+ second commit
+ final commit
+
+Run the command below with the number set to the total commits you wish to
+squash (in our case 3 commits):
+
+ git rebase -i HEAD~3
+
+You default text editor will then open up and you will see the following::
+
+ pick eb36612 initial commit
+ pick 9ac8968 second commit
+ pick a760569 final commit
+
+ # Rebase eb1429f..a760569 onto eb1429f (3 commands)
+
+We want to rebase on top of our first commit, so we change the other two commits
+to `squash`:
+
+ pick eb36612 initial commit
+ squash 9ac8968 second commit
+ squash a760569 final commit
+
+After this, should you wish to update your commit message to better summarise
+all of your pull request, run:
+
+ git commit --amend
+
+You will then need to force push (assuming your initial commit(s) were posted
+to github):
+
+ git push origin your-branch --force
+
+Alternatively, a core member can squash your commits within Github.
+
+## DCO Signoff
+
+Make sure to sign the [Developer Certificate of
+Origin](https://git-scm.com/docs/git-commit#Documentation/git-commit.txt---signoff).
+
+## Code of Conduct
+
+Rekor adheres to and enforces the [Contributor Covenant](http://contributor-covenant.org/version/1/4/) Code of Conduct.
+Please take a moment to read the [CODE_OF_CONDUCT.md](https://github.com/sigstore/rekor/blob/master/CODE_OF_CONDUCT.md) document.
+
diff --git a/vendor/github.com/sigstore/rekor/COPYRIGHT.txt b/vendor/github.com/sigstore/rekor/COPYRIGHT.txt
new file mode 100644
index 00000000000..7a01c849864
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/COPYRIGHT.txt
@@ -0,0 +1,14 @@
+
+Copyright 2021 The Sigstore Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/sigstore/rekor/LICENSE b/vendor/github.com/sigstore/rekor/LICENSE
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/create_log_entry_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/create_log_entry_parameters.go
new file mode 100644
index 00000000000..481fa2bda5e
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/create_log_entry_parameters.go
@@ -0,0 +1,164 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package entries
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/sigstore/rekor/pkg/generated/models"
+)
+
+// NewCreateLogEntryParams creates a new CreateLogEntryParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewCreateLogEntryParams() *CreateLogEntryParams {
+ return &CreateLogEntryParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewCreateLogEntryParamsWithTimeout creates a new CreateLogEntryParams object
+// with the ability to set a timeout on a request.
+func NewCreateLogEntryParamsWithTimeout(timeout time.Duration) *CreateLogEntryParams {
+ return &CreateLogEntryParams{
+ timeout: timeout,
+ }
+}
+
+// NewCreateLogEntryParamsWithContext creates a new CreateLogEntryParams object
+// with the ability to set a context for a request.
+func NewCreateLogEntryParamsWithContext(ctx context.Context) *CreateLogEntryParams {
+ return &CreateLogEntryParams{
+ Context: ctx,
+ }
+}
+
+// NewCreateLogEntryParamsWithHTTPClient creates a new CreateLogEntryParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewCreateLogEntryParamsWithHTTPClient(client *http.Client) *CreateLogEntryParams {
+ return &CreateLogEntryParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+CreateLogEntryParams contains all the parameters to send to the API endpoint
+
+ for the create log entry operation.
+
+ Typically these are written to a http.Request.
+*/
+type CreateLogEntryParams struct {
+
+ // ProposedEntry.
+ ProposedEntry models.ProposedEntry
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the create log entry params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateLogEntryParams) WithDefaults() *CreateLogEntryParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the create log entry params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateLogEntryParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the create log entry params
+func (o *CreateLogEntryParams) WithTimeout(timeout time.Duration) *CreateLogEntryParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the create log entry params
+func (o *CreateLogEntryParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the create log entry params
+func (o *CreateLogEntryParams) WithContext(ctx context.Context) *CreateLogEntryParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the create log entry params
+func (o *CreateLogEntryParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the create log entry params
+func (o *CreateLogEntryParams) WithHTTPClient(client *http.Client) *CreateLogEntryParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the create log entry params
+func (o *CreateLogEntryParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithProposedEntry adds the proposedEntry to the create log entry params
+func (o *CreateLogEntryParams) WithProposedEntry(proposedEntry models.ProposedEntry) *CreateLogEntryParams {
+ o.SetProposedEntry(proposedEntry)
+ return o
+}
+
+// SetProposedEntry adds the proposedEntry to the create log entry params
+func (o *CreateLogEntryParams) SetProposedEntry(proposedEntry models.ProposedEntry) {
+ o.ProposedEntry = proposedEntry
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *CreateLogEntryParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.ProposedEntry); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/create_log_entry_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/create_log_entry_responses.go
new file mode 100644
index 00000000000..de665ed9cb5
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/create_log_entry_responses.go
@@ -0,0 +1,397 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package entries
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ stderrors "errors"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/sigstore/rekor/pkg/generated/models"
+)
+
+// CreateLogEntryReader is a Reader for the CreateLogEntry structure.
+type CreateLogEntryReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *CreateLogEntryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) {
+ switch response.Code() {
+ case 201:
+ result := NewCreateLogEntryCreated()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewCreateLogEntryBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 409:
+ result := NewCreateLogEntryConflict()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ result := NewCreateLogEntryDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewCreateLogEntryCreated creates a CreateLogEntryCreated with default headers values
+func NewCreateLogEntryCreated() *CreateLogEntryCreated {
+ return &CreateLogEntryCreated{}
+}
+
+/*
+CreateLogEntryCreated describes a response with status code 201, with default header values.
+
+Returns the entry created in the transparency log
+*/
+type CreateLogEntryCreated struct {
+
+ /* UUID of log entry
+ */
+ ETag string
+
+ /* URI location of log entry
+
+ Format: uri
+ */
+ Location strfmt.URI
+
+ Payload models.LogEntry
+}
+
+// IsSuccess returns true when this create log entry created response has a 2xx status code
+func (o *CreateLogEntryCreated) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this create log entry created response has a 3xx status code
+func (o *CreateLogEntryCreated) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create log entry created response has a 4xx status code
+func (o *CreateLogEntryCreated) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this create log entry created response has a 5xx status code
+func (o *CreateLogEntryCreated) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create log entry created response a status code equal to that given
+func (o *CreateLogEntryCreated) IsCode(code int) bool {
+ return code == 201
+}
+
+// Code gets the status code for the create log entry created response
+func (o *CreateLogEntryCreated) Code() int {
+ return 201
+}
+
+func (o *CreateLogEntryCreated) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryCreated %s", 201, payload)
+}
+
+func (o *CreateLogEntryCreated) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryCreated %s", 201, payload)
+}
+
+func (o *CreateLogEntryCreated) GetPayload() models.LogEntry {
+ return o.Payload
+}
+
+func (o *CreateLogEntryCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // hydrates response header ETag
+ hdrETag := response.GetHeader("ETag")
+
+ if hdrETag != "" {
+ o.ETag = hdrETag
+ }
+
+ // hydrates response header Location
+ hdrLocation := response.GetHeader("Location")
+
+ if hdrLocation != "" {
+ vallocation, err := formats.Parse("uri", hdrLocation)
+ if err != nil {
+ return errors.InvalidType("Location", "header", "strfmt.URI", hdrLocation)
+ }
+ o.Location = *(vallocation.(*strfmt.URI))
+ }
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateLogEntryBadRequest creates a CreateLogEntryBadRequest with default headers values
+func NewCreateLogEntryBadRequest() *CreateLogEntryBadRequest {
+ return &CreateLogEntryBadRequest{}
+}
+
+/*
+CreateLogEntryBadRequest describes a response with status code 400, with default header values.
+
+The content supplied to the server was invalid
+*/
+type CreateLogEntryBadRequest struct {
+ Payload *models.Error
+}
+
+// IsSuccess returns true when this create log entry bad request response has a 2xx status code
+func (o *CreateLogEntryBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this create log entry bad request response has a 3xx status code
+func (o *CreateLogEntryBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create log entry bad request response has a 4xx status code
+func (o *CreateLogEntryBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this create log entry bad request response has a 5xx status code
+func (o *CreateLogEntryBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create log entry bad request response a status code equal to that given
+func (o *CreateLogEntryBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the create log entry bad request response
+func (o *CreateLogEntryBadRequest) Code() int {
+ return 400
+}
+
+func (o *CreateLogEntryBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryBadRequest %s", 400, payload)
+}
+
+func (o *CreateLogEntryBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryBadRequest %s", 400, payload)
+}
+
+func (o *CreateLogEntryBadRequest) GetPayload() *models.Error {
+ return o.Payload
+}
+
+func (o *CreateLogEntryBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Error)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateLogEntryConflict creates a CreateLogEntryConflict with default headers values
+func NewCreateLogEntryConflict() *CreateLogEntryConflict {
+ return &CreateLogEntryConflict{}
+}
+
+/*
+CreateLogEntryConflict describes a response with status code 409, with default header values.
+
+The request conflicts with the current state of the transparency log
+*/
+type CreateLogEntryConflict struct {
+ Location strfmt.URI
+
+ Payload *models.Error
+}
+
+// IsSuccess returns true when this create log entry conflict response has a 2xx status code
+func (o *CreateLogEntryConflict) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this create log entry conflict response has a 3xx status code
+func (o *CreateLogEntryConflict) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create log entry conflict response has a 4xx status code
+func (o *CreateLogEntryConflict) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this create log entry conflict response has a 5xx status code
+func (o *CreateLogEntryConflict) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create log entry conflict response a status code equal to that given
+func (o *CreateLogEntryConflict) IsCode(code int) bool {
+ return code == 409
+}
+
+// Code gets the status code for the create log entry conflict response
+func (o *CreateLogEntryConflict) Code() int {
+ return 409
+}
+
+func (o *CreateLogEntryConflict) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryConflict %s", 409, payload)
+}
+
+func (o *CreateLogEntryConflict) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryConflict %s", 409, payload)
+}
+
+func (o *CreateLogEntryConflict) GetPayload() *models.Error {
+ return o.Payload
+}
+
+func (o *CreateLogEntryConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // hydrates response header Location
+ hdrLocation := response.GetHeader("Location")
+
+ if hdrLocation != "" {
+ vallocation, err := formats.Parse("uri", hdrLocation)
+ if err != nil {
+ return errors.InvalidType("Location", "header", "strfmt.URI", hdrLocation)
+ }
+ o.Location = *(vallocation.(*strfmt.URI))
+ }
+
+ o.Payload = new(models.Error)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateLogEntryDefault creates a CreateLogEntryDefault with default headers values
+func NewCreateLogEntryDefault(code int) *CreateLogEntryDefault {
+ return &CreateLogEntryDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+CreateLogEntryDefault describes a response with status code -1, with default header values.
+
+There was an internal error in the server while processing the request
+*/
+type CreateLogEntryDefault struct {
+ _statusCode int
+
+ Payload *models.Error
+}
+
+// IsSuccess returns true when this create log entry default response has a 2xx status code
+func (o *CreateLogEntryDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this create log entry default response has a 3xx status code
+func (o *CreateLogEntryDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this create log entry default response has a 4xx status code
+func (o *CreateLogEntryDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this create log entry default response has a 5xx status code
+func (o *CreateLogEntryDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this create log entry default response a status code equal to that given
+func (o *CreateLogEntryDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the create log entry default response
+func (o *CreateLogEntryDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *CreateLogEntryDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntry default %s", o._statusCode, payload)
+}
+
+func (o *CreateLogEntryDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntry default %s", o._statusCode, payload)
+}
+
+func (o *CreateLogEntryDefault) GetPayload() *models.Error {
+ return o.Payload
+}
+
+func (o *CreateLogEntryDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Error)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/entries_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/entries_client.go
new file mode 100644
index 00000000000..f893db3dc97
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/entries_client.go
@@ -0,0 +1,259 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package entries
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new entries API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new entries API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new entries API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for entries API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ CreateLogEntry(params *CreateLogEntryParams, opts ...ClientOption) (*CreateLogEntryCreated, error)
+
+ GetLogEntryByIndex(params *GetLogEntryByIndexParams, opts ...ClientOption) (*GetLogEntryByIndexOK, error)
+
+ GetLogEntryByUUID(params *GetLogEntryByUUIDParams, opts ...ClientOption) (*GetLogEntryByUUIDOK, error)
+
+ SearchLogQuery(params *SearchLogQueryParams, opts ...ClientOption) (*SearchLogQueryOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+CreateLogEntry creates an entry in the transparency log
+
+Creates an entry in the transparency log for a detached signature, public key, and content. Items can be included in the request or fetched by the server when URLs are specified.
+*/
+func (a *Client) CreateLogEntry(params *CreateLogEntryParams, opts ...ClientOption) (*CreateLogEntryCreated, error) {
+ // NOTE: parameters are not validated before sending
+ if params == nil {
+ params = NewCreateLogEntryParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "createLogEntry",
+ Method: "POST",
+ PathPattern: "/api/v1/log/entries",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &CreateLogEntryReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+
+ // only one success response has to be checked
+ success, ok := result.(*CreateLogEntryCreated)
+ if ok {
+ return success, nil
+ }
+
+ // unexpected success response.
+ //
+ // a default response is provided: fill this and return an error
+ unexpectedSuccess := result.(*CreateLogEntryDefault)
+
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+GetLogEntryByIndex retrieves an entry and inclusion proof from the transparency log if it exists by index
+*/
+func (a *Client) GetLogEntryByIndex(params *GetLogEntryByIndexParams, opts ...ClientOption) (*GetLogEntryByIndexOK, error) {
+ // NOTE: parameters are not validated before sending
+ if params == nil {
+ params = NewGetLogEntryByIndexParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "getLogEntryByIndex",
+ Method: "GET",
+ PathPattern: "/api/v1/log/entries",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetLogEntryByIndexReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+
+ // only one success response has to be checked
+ success, ok := result.(*GetLogEntryByIndexOK)
+ if ok {
+ return success, nil
+ }
+
+ // unexpected success response.
+ //
+ // a default response is provided: fill this and return an error
+ unexpectedSuccess := result.(*GetLogEntryByIndexDefault)
+
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+GetLogEntryByUUID gets log entry and information required to generate an inclusion proof for the entry in the transparency log
+
+Returns the entry, root hash, tree size, and a list of hashes that can be used to calculate proof of an entry being included in the transparency log
+*/
+func (a *Client) GetLogEntryByUUID(params *GetLogEntryByUUIDParams, opts ...ClientOption) (*GetLogEntryByUUIDOK, error) {
+ // NOTE: parameters are not validated before sending
+ if params == nil {
+ params = NewGetLogEntryByUUIDParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "getLogEntryByUUID",
+ Method: "GET",
+ PathPattern: "/api/v1/log/entries/{entryUUID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetLogEntryByUUIDReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+
+ // only one success response has to be checked
+ success, ok := result.(*GetLogEntryByUUIDOK)
+ if ok {
+ return success, nil
+ }
+
+ // unexpected success response.
+ //
+ // a default response is provided: fill this and return an error
+ unexpectedSuccess := result.(*GetLogEntryByUUIDDefault)
+
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+SearchLogQuery searches transparency log for one or more log entries
+*/
+func (a *Client) SearchLogQuery(params *SearchLogQueryParams, opts ...ClientOption) (*SearchLogQueryOK, error) {
+ // NOTE: parameters are not validated before sending
+ if params == nil {
+ params = NewSearchLogQueryParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "searchLogQuery",
+ Method: "POST",
+ PathPattern: "/api/v1/log/entries/retrieve",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &SearchLogQueryReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+
+ // only one success response has to be checked
+ success, ok := result.(*SearchLogQueryOK)
+ if ok {
+ return success, nil
+ }
+
+ // unexpected success response.
+ //
+ // a default response is provided: fill this and return an error
+ unexpectedSuccess := result.(*SearchLogQueryDefault)
+
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_index_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_index_parameters.go
new file mode 100644
index 00000000000..e2252275119
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_index_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package entries
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewGetLogEntryByIndexParams creates a new GetLogEntryByIndexParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetLogEntryByIndexParams() *GetLogEntryByIndexParams {
+ return &GetLogEntryByIndexParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetLogEntryByIndexParamsWithTimeout creates a new GetLogEntryByIndexParams object
+// with the ability to set a timeout on a request.
+func NewGetLogEntryByIndexParamsWithTimeout(timeout time.Duration) *GetLogEntryByIndexParams {
+ return &GetLogEntryByIndexParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetLogEntryByIndexParamsWithContext creates a new GetLogEntryByIndexParams object
+// with the ability to set a context for a request.
+func NewGetLogEntryByIndexParamsWithContext(ctx context.Context) *GetLogEntryByIndexParams {
+ return &GetLogEntryByIndexParams{
+ Context: ctx,
+ }
+}
+
+// NewGetLogEntryByIndexParamsWithHTTPClient creates a new GetLogEntryByIndexParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetLogEntryByIndexParamsWithHTTPClient(client *http.Client) *GetLogEntryByIndexParams {
+ return &GetLogEntryByIndexParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetLogEntryByIndexParams contains all the parameters to send to the API endpoint
+
+ for the get log entry by index operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetLogEntryByIndexParams struct {
+
+ /* LogIndex.
+
+ specifies the index of the entry in the transparency log to be retrieved
+ */
+ LogIndex int64
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get log entry by index params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetLogEntryByIndexParams) WithDefaults() *GetLogEntryByIndexParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get log entry by index params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetLogEntryByIndexParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get log entry by index params
+func (o *GetLogEntryByIndexParams) WithTimeout(timeout time.Duration) *GetLogEntryByIndexParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get log entry by index params
+func (o *GetLogEntryByIndexParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get log entry by index params
+func (o *GetLogEntryByIndexParams) WithContext(ctx context.Context) *GetLogEntryByIndexParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get log entry by index params
+func (o *GetLogEntryByIndexParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get log entry by index params
+func (o *GetLogEntryByIndexParams) WithHTTPClient(client *http.Client) *GetLogEntryByIndexParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get log entry by index params
+func (o *GetLogEntryByIndexParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithLogIndex adds the logIndex to the get log entry by index params
+func (o *GetLogEntryByIndexParams) WithLogIndex(logIndex int64) *GetLogEntryByIndexParams {
+ o.SetLogIndex(logIndex)
+ return o
+}
+
+// SetLogIndex adds the logIndex to the get log entry by index params
+func (o *GetLogEntryByIndexParams) SetLogIndex(logIndex int64) {
+ o.LogIndex = logIndex
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetLogEntryByIndexParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // query param logIndex
+ qrLogIndex := o.LogIndex
+ qLogIndex := swag.FormatInt64(qrLogIndex)
+ if qLogIndex != "" {
+
+ if err := r.SetQueryParam("logIndex", qLogIndex); err != nil {
+ return err
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_index_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_index_responses.go
new file mode 100644
index 00000000000..40e17b3cfcd
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_index_responses.go
@@ -0,0 +1,264 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package entries
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ stderrors "errors"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/sigstore/rekor/pkg/generated/models"
+)
+
+// GetLogEntryByIndexReader is a Reader for the GetLogEntryByIndex structure.
+type GetLogEntryByIndexReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetLogEntryByIndexReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetLogEntryByIndexOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 404:
+ result := NewGetLogEntryByIndexNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ result := NewGetLogEntryByIndexDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetLogEntryByIndexOK creates a GetLogEntryByIndexOK with default headers values
+func NewGetLogEntryByIndexOK() *GetLogEntryByIndexOK {
+ return &GetLogEntryByIndexOK{}
+}
+
+/*
+GetLogEntryByIndexOK describes a response with status code 200, with default header values.
+
+the entry in the transparency log requested along with an inclusion proof
+*/
+type GetLogEntryByIndexOK struct {
+ Payload models.LogEntry
+}
+
+// IsSuccess returns true when this get log entry by index o k response has a 2xx status code
+func (o *GetLogEntryByIndexOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get log entry by index o k response has a 3xx status code
+func (o *GetLogEntryByIndexOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get log entry by index o k response has a 4xx status code
+func (o *GetLogEntryByIndexOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get log entry by index o k response has a 5xx status code
+func (o *GetLogEntryByIndexOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get log entry by index o k response a status code equal to that given
+func (o *GetLogEntryByIndexOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get log entry by index o k response
+func (o *GetLogEntryByIndexOK) Code() int {
+ return 200
+}
+
+func (o *GetLogEntryByIndexOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexOK %s", 200, payload)
+}
+
+func (o *GetLogEntryByIndexOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexOK %s", 200, payload)
+}
+
+func (o *GetLogEntryByIndexOK) GetPayload() models.LogEntry {
+ return o.Payload
+}
+
+func (o *GetLogEntryByIndexOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetLogEntryByIndexNotFound creates a GetLogEntryByIndexNotFound with default headers values
+func NewGetLogEntryByIndexNotFound() *GetLogEntryByIndexNotFound {
+ return &GetLogEntryByIndexNotFound{}
+}
+
+/*
+GetLogEntryByIndexNotFound describes a response with status code 404, with default header values.
+
+The content requested could not be found
+*/
+type GetLogEntryByIndexNotFound struct {
+}
+
+// IsSuccess returns true when this get log entry by index not found response has a 2xx status code
+func (o *GetLogEntryByIndexNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get log entry by index not found response has a 3xx status code
+func (o *GetLogEntryByIndexNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get log entry by index not found response has a 4xx status code
+func (o *GetLogEntryByIndexNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get log entry by index not found response has a 5xx status code
+func (o *GetLogEntryByIndexNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get log entry by index not found response a status code equal to that given
+func (o *GetLogEntryByIndexNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+// Code gets the status code for the get log entry by index not found response
+func (o *GetLogEntryByIndexNotFound) Code() int {
+ return 404
+}
+
+func (o *GetLogEntryByIndexNotFound) Error() string {
+ return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexNotFound", 404)
+}
+
+func (o *GetLogEntryByIndexNotFound) String() string {
+ return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexNotFound", 404)
+}
+
+func (o *GetLogEntryByIndexNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewGetLogEntryByIndexDefault creates a GetLogEntryByIndexDefault with default headers values
+func NewGetLogEntryByIndexDefault(code int) *GetLogEntryByIndexDefault {
+ return &GetLogEntryByIndexDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetLogEntryByIndexDefault describes a response with status code -1, with default header values.
+
+There was an internal error in the server while processing the request
+*/
+type GetLogEntryByIndexDefault struct {
+ _statusCode int
+
+ Payload *models.Error
+}
+
+// IsSuccess returns true when this get log entry by index default response has a 2xx status code
+func (o *GetLogEntryByIndexDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get log entry by index default response has a 3xx status code
+func (o *GetLogEntryByIndexDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get log entry by index default response has a 4xx status code
+func (o *GetLogEntryByIndexDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get log entry by index default response has a 5xx status code
+func (o *GetLogEntryByIndexDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get log entry by index default response a status code equal to that given
+func (o *GetLogEntryByIndexDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get log entry by index default response
+func (o *GetLogEntryByIndexDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetLogEntryByIndexDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndex default %s", o._statusCode, payload)
+}
+
+func (o *GetLogEntryByIndexDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndex default %s", o._statusCode, payload)
+}
+
+func (o *GetLogEntryByIndexDefault) GetPayload() *models.Error {
+ return o.Payload
+}
+
+func (o *GetLogEntryByIndexDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Error)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_uuid_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_uuid_parameters.go
new file mode 100644
index 00000000000..5c88b526546
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_uuid_parameters.go
@@ -0,0 +1,167 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package entries
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetLogEntryByUUIDParams creates a new GetLogEntryByUUIDParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetLogEntryByUUIDParams() *GetLogEntryByUUIDParams {
+ return &GetLogEntryByUUIDParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetLogEntryByUUIDParamsWithTimeout creates a new GetLogEntryByUUIDParams object
+// with the ability to set a timeout on a request.
+func NewGetLogEntryByUUIDParamsWithTimeout(timeout time.Duration) *GetLogEntryByUUIDParams {
+ return &GetLogEntryByUUIDParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetLogEntryByUUIDParamsWithContext creates a new GetLogEntryByUUIDParams object
+// with the ability to set a context for a request.
+func NewGetLogEntryByUUIDParamsWithContext(ctx context.Context) *GetLogEntryByUUIDParams {
+ return &GetLogEntryByUUIDParams{
+ Context: ctx,
+ }
+}
+
+// NewGetLogEntryByUUIDParamsWithHTTPClient creates a new GetLogEntryByUUIDParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetLogEntryByUUIDParamsWithHTTPClient(client *http.Client) *GetLogEntryByUUIDParams {
+ return &GetLogEntryByUUIDParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetLogEntryByUUIDParams contains all the parameters to send to the API endpoint
+
+ for the get log entry by UUID operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetLogEntryByUUIDParams struct {
+
+ /* EntryUUID.
+
+ the UUID of the entry for which the inclusion proof information should be returned
+ */
+ EntryUUID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get log entry by UUID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetLogEntryByUUIDParams) WithDefaults() *GetLogEntryByUUIDParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get log entry by UUID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetLogEntryByUUIDParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get log entry by UUID params
+func (o *GetLogEntryByUUIDParams) WithTimeout(timeout time.Duration) *GetLogEntryByUUIDParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get log entry by UUID params
+func (o *GetLogEntryByUUIDParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get log entry by UUID params
+func (o *GetLogEntryByUUIDParams) WithContext(ctx context.Context) *GetLogEntryByUUIDParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get log entry by UUID params
+func (o *GetLogEntryByUUIDParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get log entry by UUID params
+func (o *GetLogEntryByUUIDParams) WithHTTPClient(client *http.Client) *GetLogEntryByUUIDParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get log entry by UUID params
+func (o *GetLogEntryByUUIDParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithEntryUUID adds the entryUUID to the get log entry by UUID params
+func (o *GetLogEntryByUUIDParams) WithEntryUUID(entryUUID string) *GetLogEntryByUUIDParams {
+ o.SetEntryUUID(entryUUID)
+ return o
+}
+
+// SetEntryUUID adds the entryUuid to the get log entry by UUID params
+func (o *GetLogEntryByUUIDParams) SetEntryUUID(entryUUID string) {
+ o.EntryUUID = entryUUID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetLogEntryByUUIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param entryUUID
+ if err := r.SetPathParam("entryUUID", o.EntryUUID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_uuid_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_uuid_responses.go
new file mode 100644
index 00000000000..3498e27287e
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/get_log_entry_by_uuid_responses.go
@@ -0,0 +1,264 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package entries
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ stderrors "errors"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/sigstore/rekor/pkg/generated/models"
+)
+
+// GetLogEntryByUUIDReader is a Reader for the GetLogEntryByUUID structure.
+type GetLogEntryByUUIDReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetLogEntryByUUIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetLogEntryByUUIDOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 404:
+ result := NewGetLogEntryByUUIDNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ result := NewGetLogEntryByUUIDDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetLogEntryByUUIDOK creates a GetLogEntryByUUIDOK with default headers values
+func NewGetLogEntryByUUIDOK() *GetLogEntryByUUIDOK {
+ return &GetLogEntryByUUIDOK{}
+}
+
+/*
+GetLogEntryByUUIDOK describes a response with status code 200, with default header values.
+
+Information needed for a client to compute the inclusion proof
+*/
+type GetLogEntryByUUIDOK struct {
+ Payload models.LogEntry
+}
+
+// IsSuccess returns true when this get log entry by Uuid o k response has a 2xx status code
+func (o *GetLogEntryByUUIDOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get log entry by Uuid o k response has a 3xx status code
+func (o *GetLogEntryByUUIDOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get log entry by Uuid o k response has a 4xx status code
+func (o *GetLogEntryByUUIDOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get log entry by Uuid o k response has a 5xx status code
+func (o *GetLogEntryByUUIDOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get log entry by Uuid o k response a status code equal to that given
+func (o *GetLogEntryByUUIDOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get log entry by Uuid o k response
+func (o *GetLogEntryByUUIDOK) Code() int {
+ return 200
+}
+
+func (o *GetLogEntryByUUIDOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidOK %s", 200, payload)
+}
+
+func (o *GetLogEntryByUUIDOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidOK %s", 200, payload)
+}
+
+func (o *GetLogEntryByUUIDOK) GetPayload() models.LogEntry {
+ return o.Payload
+}
+
+func (o *GetLogEntryByUUIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetLogEntryByUUIDNotFound creates a GetLogEntryByUUIDNotFound with default headers values
+func NewGetLogEntryByUUIDNotFound() *GetLogEntryByUUIDNotFound {
+ return &GetLogEntryByUUIDNotFound{}
+}
+
+/*
+GetLogEntryByUUIDNotFound describes a response with status code 404, with default header values.
+
+The content requested could not be found
+*/
+type GetLogEntryByUUIDNotFound struct {
+}
+
+// IsSuccess returns true when this get log entry by Uuid not found response has a 2xx status code
+func (o *GetLogEntryByUUIDNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get log entry by Uuid not found response has a 3xx status code
+func (o *GetLogEntryByUUIDNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get log entry by Uuid not found response has a 4xx status code
+func (o *GetLogEntryByUUIDNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get log entry by Uuid not found response has a 5xx status code
+func (o *GetLogEntryByUUIDNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get log entry by Uuid not found response a status code equal to that given
+func (o *GetLogEntryByUUIDNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+// Code gets the status code for the get log entry by Uuid not found response
+func (o *GetLogEntryByUUIDNotFound) Code() int {
+ return 404
+}
+
+func (o *GetLogEntryByUUIDNotFound) Error() string {
+ return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidNotFound", 404)
+}
+
+func (o *GetLogEntryByUUIDNotFound) String() string {
+ return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidNotFound", 404)
+}
+
+func (o *GetLogEntryByUUIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewGetLogEntryByUUIDDefault creates a GetLogEntryByUUIDDefault with default headers values
+func NewGetLogEntryByUUIDDefault(code int) *GetLogEntryByUUIDDefault {
+ return &GetLogEntryByUUIDDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetLogEntryByUUIDDefault describes a response with status code -1, with default header values.
+
+There was an internal error in the server while processing the request
+*/
+type GetLogEntryByUUIDDefault struct {
+ _statusCode int
+
+ Payload *models.Error
+}
+
+// IsSuccess returns true when this get log entry by UUID default response has a 2xx status code
+func (o *GetLogEntryByUUIDDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get log entry by UUID default response has a 3xx status code
+func (o *GetLogEntryByUUIDDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get log entry by UUID default response has a 4xx status code
+func (o *GetLogEntryByUUIDDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get log entry by UUID default response has a 5xx status code
+func (o *GetLogEntryByUUIDDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get log entry by UUID default response a status code equal to that given
+func (o *GetLogEntryByUUIDDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get log entry by UUID default response
+func (o *GetLogEntryByUUIDDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetLogEntryByUUIDDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUUID default %s", o._statusCode, payload)
+}
+
+func (o *GetLogEntryByUUIDDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUUID default %s", o._statusCode, payload)
+}
+
+func (o *GetLogEntryByUUIDDefault) GetPayload() *models.Error {
+ return o.Payload
+}
+
+func (o *GetLogEntryByUUIDDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Error)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/search_log_query_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/search_log_query_parameters.go
new file mode 100644
index 00000000000..ed158ce23e3
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/search_log_query_parameters.go
@@ -0,0 +1,166 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package entries
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/sigstore/rekor/pkg/generated/models"
+)
+
+// NewSearchLogQueryParams creates a new SearchLogQueryParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewSearchLogQueryParams() *SearchLogQueryParams {
+ return &SearchLogQueryParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewSearchLogQueryParamsWithTimeout creates a new SearchLogQueryParams object
+// with the ability to set a timeout on a request.
+func NewSearchLogQueryParamsWithTimeout(timeout time.Duration) *SearchLogQueryParams {
+ return &SearchLogQueryParams{
+ timeout: timeout,
+ }
+}
+
+// NewSearchLogQueryParamsWithContext creates a new SearchLogQueryParams object
+// with the ability to set a context for a request.
+func NewSearchLogQueryParamsWithContext(ctx context.Context) *SearchLogQueryParams {
+ return &SearchLogQueryParams{
+ Context: ctx,
+ }
+}
+
+// NewSearchLogQueryParamsWithHTTPClient creates a new SearchLogQueryParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewSearchLogQueryParamsWithHTTPClient(client *http.Client) *SearchLogQueryParams {
+ return &SearchLogQueryParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+SearchLogQueryParams contains all the parameters to send to the API endpoint
+
+ for the search log query operation.
+
+ Typically these are written to a http.Request.
+*/
+type SearchLogQueryParams struct {
+
+ // Entry.
+ Entry *models.SearchLogQuery
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the search log query params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *SearchLogQueryParams) WithDefaults() *SearchLogQueryParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the search log query params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *SearchLogQueryParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the search log query params
+func (o *SearchLogQueryParams) WithTimeout(timeout time.Duration) *SearchLogQueryParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the search log query params
+func (o *SearchLogQueryParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the search log query params
+func (o *SearchLogQueryParams) WithContext(ctx context.Context) *SearchLogQueryParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the search log query params
+func (o *SearchLogQueryParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the search log query params
+func (o *SearchLogQueryParams) WithHTTPClient(client *http.Client) *SearchLogQueryParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the search log query params
+func (o *SearchLogQueryParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithEntry adds the entry to the search log query params
+func (o *SearchLogQueryParams) WithEntry(entry *models.SearchLogQuery) *SearchLogQueryParams {
+ o.SetEntry(entry)
+ return o
+}
+
+// SetEntry adds the entry to the search log query params
+func (o *SearchLogQueryParams) SetEntry(entry *models.SearchLogQuery) {
+ o.Entry = entry
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *SearchLogQueryParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if o.Entry != nil {
+ if err := r.SetBodyParam(o.Entry); err != nil {
+ return err
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/search_log_query_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/search_log_query_responses.go
new file mode 100644
index 00000000000..13d5ba27864
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/search_log_query_responses.go
@@ -0,0 +1,354 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package entries
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ stderrors "errors"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/sigstore/rekor/pkg/generated/models"
+)
+
+// SearchLogQueryReader is a Reader for the SearchLogQuery structure.
+type SearchLogQueryReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *SearchLogQueryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) {
+ switch response.Code() {
+ case 200:
+ result := NewSearchLogQueryOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewSearchLogQueryBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 422:
+ result := NewSearchLogQueryUnprocessableEntity()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ result := NewSearchLogQueryDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewSearchLogQueryOK creates a SearchLogQueryOK with default headers values
+func NewSearchLogQueryOK() *SearchLogQueryOK {
+ return &SearchLogQueryOK{}
+}
+
+/*
+SearchLogQueryOK describes a response with status code 200, with default header values.
+
+Returns zero or more entries from the transparency log, according to how many were included in request query
+*/
+type SearchLogQueryOK struct {
+ Payload []models.LogEntry
+}
+
+// IsSuccess returns true when this search log query o k response has a 2xx status code
+func (o *SearchLogQueryOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this search log query o k response has a 3xx status code
+func (o *SearchLogQueryOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this search log query o k response has a 4xx status code
+func (o *SearchLogQueryOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this search log query o k response has a 5xx status code
+func (o *SearchLogQueryOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this search log query o k response a status code equal to that given
+func (o *SearchLogQueryOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the search log query o k response
+func (o *SearchLogQueryOK) Code() int {
+ return 200
+}
+
+func (o *SearchLogQueryOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryOK %s", 200, payload)
+}
+
+func (o *SearchLogQueryOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryOK %s", 200, payload)
+}
+
+func (o *SearchLogQueryOK) GetPayload() []models.LogEntry {
+ return o.Payload
+}
+
+func (o *SearchLogQueryOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+
+ return nil
+}
+
+// NewSearchLogQueryBadRequest creates a SearchLogQueryBadRequest with default headers values
+func NewSearchLogQueryBadRequest() *SearchLogQueryBadRequest {
+ return &SearchLogQueryBadRequest{}
+}
+
+/*
+SearchLogQueryBadRequest describes a response with status code 400, with default header values.
+
+The content supplied to the server was invalid
+*/
+type SearchLogQueryBadRequest struct {
+ Payload *models.Error
+}
+
+// IsSuccess returns true when this search log query bad request response has a 2xx status code
+func (o *SearchLogQueryBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this search log query bad request response has a 3xx status code
+func (o *SearchLogQueryBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this search log query bad request response has a 4xx status code
+func (o *SearchLogQueryBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this search log query bad request response has a 5xx status code
+func (o *SearchLogQueryBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this search log query bad request response a status code equal to that given
+func (o *SearchLogQueryBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the search log query bad request response
+func (o *SearchLogQueryBadRequest) Code() int {
+ return 400
+}
+
+func (o *SearchLogQueryBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryBadRequest %s", 400, payload)
+}
+
+func (o *SearchLogQueryBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryBadRequest %s", 400, payload)
+}
+
+func (o *SearchLogQueryBadRequest) GetPayload() *models.Error {
+ return o.Payload
+}
+
+func (o *SearchLogQueryBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Error)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+
+ return nil
+}
+
+// NewSearchLogQueryUnprocessableEntity creates a SearchLogQueryUnprocessableEntity with default headers values
+func NewSearchLogQueryUnprocessableEntity() *SearchLogQueryUnprocessableEntity {
+ return &SearchLogQueryUnprocessableEntity{}
+}
+
+/*
+SearchLogQueryUnprocessableEntity describes a response with status code 422, with default header values.
+
+The server understood the request but is unable to process the contained instructions
+*/
+type SearchLogQueryUnprocessableEntity struct {
+ Payload *models.Error
+}
+
+// IsSuccess returns true when this search log query unprocessable entity response has a 2xx status code
+func (o *SearchLogQueryUnprocessableEntity) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this search log query unprocessable entity response has a 3xx status code
+func (o *SearchLogQueryUnprocessableEntity) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this search log query unprocessable entity response has a 4xx status code
+func (o *SearchLogQueryUnprocessableEntity) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this search log query unprocessable entity response has a 5xx status code
+func (o *SearchLogQueryUnprocessableEntity) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this search log query unprocessable entity response a status code equal to that given
+func (o *SearchLogQueryUnprocessableEntity) IsCode(code int) bool {
+ return code == 422
+}
+
+// Code gets the status code for the search log query unprocessable entity response
+func (o *SearchLogQueryUnprocessableEntity) Code() int {
+ return 422
+}
+
+func (o *SearchLogQueryUnprocessableEntity) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryUnprocessableEntity %s", 422, payload)
+}
+
+func (o *SearchLogQueryUnprocessableEntity) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryUnprocessableEntity %s", 422, payload)
+}
+
+func (o *SearchLogQueryUnprocessableEntity) GetPayload() *models.Error {
+ return o.Payload
+}
+
+func (o *SearchLogQueryUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Error)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+
+ return nil
+}
+
+// NewSearchLogQueryDefault creates a SearchLogQueryDefault with default headers values
+func NewSearchLogQueryDefault(code int) *SearchLogQueryDefault {
+ return &SearchLogQueryDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+SearchLogQueryDefault describes a response with status code -1, with default header values.
+
+There was an internal error in the server while processing the request
+*/
+type SearchLogQueryDefault struct {
+ _statusCode int
+
+ Payload *models.Error
+}
+
+// IsSuccess returns true when this search log query default response has a 2xx status code
+func (o *SearchLogQueryDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this search log query default response has a 3xx status code
+func (o *SearchLogQueryDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this search log query default response has a 4xx status code
+func (o *SearchLogQueryDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this search log query default response has a 5xx status code
+func (o *SearchLogQueryDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this search log query default response a status code equal to that given
+func (o *SearchLogQueryDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the search log query default response
+func (o *SearchLogQueryDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *SearchLogQueryDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQuery default %s", o._statusCode, payload)
+}
+
+func (o *SearchLogQueryDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQuery default %s", o._statusCode, payload)
+}
+
+func (o *SearchLogQueryDefault) GetPayload() *models.Error {
+ return o.Payload
+}
+
+func (o *SearchLogQueryDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Error)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/index/index_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/index_client.go
new file mode 100644
index 00000000000..80db490317c
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/index_client.go
@@ -0,0 +1,127 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package index
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new index API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new index API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new index API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for index API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ SearchIndex(params *SearchIndexParams, opts ...ClientOption) (*SearchIndexOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+ SearchIndex searches index by entry metadata
+
+ EXPERIMENTAL - this endpoint is offered as best effort only and may be changed or removed in future releases.
+
+The results returned from this endpoint may be incomplete.
+*/
+func (a *Client) SearchIndex(params *SearchIndexParams, opts ...ClientOption) (*SearchIndexOK, error) {
+ // NOTE: parameters are not validated before sending
+ if params == nil {
+ params = NewSearchIndexParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "searchIndex",
+ Method: "POST",
+ PathPattern: "/api/v1/index/retrieve",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &SearchIndexReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+
+ // only one success response has to be checked
+ success, ok := result.(*SearchIndexOK)
+ if ok {
+ return success, nil
+ }
+
+ // unexpected success response.
+ //
+ // a default response is provided: fill this and return an error
+ unexpectedSuccess := result.(*SearchIndexDefault)
+
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_parameters.go
new file mode 100644
index 00000000000..c1694193ef9
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_parameters.go
@@ -0,0 +1,166 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package index
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/sigstore/rekor/pkg/generated/models"
+)
+
+// NewSearchIndexParams creates a new SearchIndexParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewSearchIndexParams() *SearchIndexParams {
+ return &SearchIndexParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewSearchIndexParamsWithTimeout creates a new SearchIndexParams object
+// with the ability to set a timeout on a request.
+func NewSearchIndexParamsWithTimeout(timeout time.Duration) *SearchIndexParams {
+ return &SearchIndexParams{
+ timeout: timeout,
+ }
+}
+
+// NewSearchIndexParamsWithContext creates a new SearchIndexParams object
+// with the ability to set a context for a request.
+func NewSearchIndexParamsWithContext(ctx context.Context) *SearchIndexParams {
+ return &SearchIndexParams{
+ Context: ctx,
+ }
+}
+
+// NewSearchIndexParamsWithHTTPClient creates a new SearchIndexParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewSearchIndexParamsWithHTTPClient(client *http.Client) *SearchIndexParams {
+ return &SearchIndexParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+SearchIndexParams contains all the parameters to send to the API endpoint
+
+ for the search index operation.
+
+ Typically these are written to a http.Request.
+*/
+type SearchIndexParams struct {
+
+ // Query.
+ Query *models.SearchIndex
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the search index params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *SearchIndexParams) WithDefaults() *SearchIndexParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the search index params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *SearchIndexParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the search index params
+func (o *SearchIndexParams) WithTimeout(timeout time.Duration) *SearchIndexParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the search index params
+func (o *SearchIndexParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the search index params
+func (o *SearchIndexParams) WithContext(ctx context.Context) *SearchIndexParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the search index params
+func (o *SearchIndexParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the search index params
+func (o *SearchIndexParams) WithHTTPClient(client *http.Client) *SearchIndexParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the search index params
+func (o *SearchIndexParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithQuery adds the query to the search index params
+func (o *SearchIndexParams) WithQuery(query *models.SearchIndex) *SearchIndexParams {
+ o.SetQuery(query)
+ return o
+}
+
+// SetQuery adds the query to the search index params
+func (o *SearchIndexParams) SetQuery(query *models.SearchIndex) {
+ o.Query = query
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *SearchIndexParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if o.Query != nil {
+ if err := r.SetBodyParam(o.Query); err != nil {
+ return err
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_responses.go
new file mode 100644
index 00000000000..8c62eca17b1
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_responses.go
@@ -0,0 +1,278 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package index
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ stderrors "errors"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/sigstore/rekor/pkg/generated/models"
+)
+
+// SearchIndexReader is a Reader for the SearchIndex structure.
+type SearchIndexReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *SearchIndexReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) {
+ switch response.Code() {
+ case 200:
+ result := NewSearchIndexOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewSearchIndexBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ result := NewSearchIndexDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewSearchIndexOK creates a SearchIndexOK with default headers values
+func NewSearchIndexOK() *SearchIndexOK {
+ return &SearchIndexOK{}
+}
+
+/*
+SearchIndexOK describes a response with status code 200, with default header values.
+
+Returns zero or more entry UUIDs from the transparency log based on search query
+*/
+type SearchIndexOK struct {
+ Payload []string
+}
+
+// IsSuccess returns true when this search index o k response has a 2xx status code
+func (o *SearchIndexOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this search index o k response has a 3xx status code
+func (o *SearchIndexOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this search index o k response has a 4xx status code
+func (o *SearchIndexOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this search index o k response has a 5xx status code
+func (o *SearchIndexOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this search index o k response a status code equal to that given
+func (o *SearchIndexOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the search index o k response
+func (o *SearchIndexOK) Code() int {
+ return 200
+}
+
+func (o *SearchIndexOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexOK %s", 200, payload)
+}
+
+func (o *SearchIndexOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexOK %s", 200, payload)
+}
+
+func (o *SearchIndexOK) GetPayload() []string {
+ return o.Payload
+}
+
+func (o *SearchIndexOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+
+ return nil
+}
+
+// NewSearchIndexBadRequest creates a SearchIndexBadRequest with default headers values
+func NewSearchIndexBadRequest() *SearchIndexBadRequest {
+ return &SearchIndexBadRequest{}
+}
+
+/*
+SearchIndexBadRequest describes a response with status code 400, with default header values.
+
+The content supplied to the server was invalid
+*/
+type SearchIndexBadRequest struct {
+ Payload *models.Error
+}
+
+// IsSuccess returns true when this search index bad request response has a 2xx status code
+func (o *SearchIndexBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this search index bad request response has a 3xx status code
+func (o *SearchIndexBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this search index bad request response has a 4xx status code
+func (o *SearchIndexBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this search index bad request response has a 5xx status code
+func (o *SearchIndexBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this search index bad request response a status code equal to that given
+func (o *SearchIndexBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the search index bad request response
+func (o *SearchIndexBadRequest) Code() int {
+ return 400
+}
+
+func (o *SearchIndexBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexBadRequest %s", 400, payload)
+}
+
+func (o *SearchIndexBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexBadRequest %s", 400, payload)
+}
+
+func (o *SearchIndexBadRequest) GetPayload() *models.Error {
+ return o.Payload
+}
+
+func (o *SearchIndexBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Error)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+
+ return nil
+}
+
+// NewSearchIndexDefault creates a SearchIndexDefault with default headers values
+func NewSearchIndexDefault(code int) *SearchIndexDefault {
+ return &SearchIndexDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+SearchIndexDefault describes a response with status code -1, with default header values.
+
+There was an internal error in the server while processing the request
+*/
+type SearchIndexDefault struct {
+ _statusCode int
+
+ Payload *models.Error
+}
+
+// IsSuccess returns true when this search index default response has a 2xx status code
+func (o *SearchIndexDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this search index default response has a 3xx status code
+func (o *SearchIndexDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this search index default response has a 4xx status code
+func (o *SearchIndexDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this search index default response has a 5xx status code
+func (o *SearchIndexDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this search index default response a status code equal to that given
+func (o *SearchIndexDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the search index default response
+func (o *SearchIndexDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *SearchIndexDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndex default %s", o._statusCode, payload)
+}
+
+func (o *SearchIndexDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndex default %s", o._statusCode, payload)
+}
+
+func (o *SearchIndexDefault) GetPayload() *models.Error {
+ return o.Payload
+}
+
+func (o *SearchIndexDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Error)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_parameters.go
new file mode 100644
index 00000000000..b4248c933a7
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_parameters.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package pubkey
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetPublicKeyParams creates a new GetPublicKeyParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetPublicKeyParams() *GetPublicKeyParams {
+ return &GetPublicKeyParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetPublicKeyParamsWithTimeout creates a new GetPublicKeyParams object
+// with the ability to set a timeout on a request.
+func NewGetPublicKeyParamsWithTimeout(timeout time.Duration) *GetPublicKeyParams {
+ return &GetPublicKeyParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetPublicKeyParamsWithContext creates a new GetPublicKeyParams object
+// with the ability to set a context for a request.
+func NewGetPublicKeyParamsWithContext(ctx context.Context) *GetPublicKeyParams {
+ return &GetPublicKeyParams{
+ Context: ctx,
+ }
+}
+
+// NewGetPublicKeyParamsWithHTTPClient creates a new GetPublicKeyParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetPublicKeyParamsWithHTTPClient(client *http.Client) *GetPublicKeyParams {
+ return &GetPublicKeyParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetPublicKeyParams contains all the parameters to send to the API endpoint
+
+ for the get public key operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetPublicKeyParams struct {
+
+ /* TreeID.
+
+ The tree ID of the tree you wish to get a public key for
+ */
+ TreeID *string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get public key params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetPublicKeyParams) WithDefaults() *GetPublicKeyParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get public key params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetPublicKeyParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get public key params
+func (o *GetPublicKeyParams) WithTimeout(timeout time.Duration) *GetPublicKeyParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get public key params
+func (o *GetPublicKeyParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get public key params
+func (o *GetPublicKeyParams) WithContext(ctx context.Context) *GetPublicKeyParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get public key params
+func (o *GetPublicKeyParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get public key params
+func (o *GetPublicKeyParams) WithHTTPClient(client *http.Client) *GetPublicKeyParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get public key params
+func (o *GetPublicKeyParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithTreeID adds the treeID to the get public key params
+func (o *GetPublicKeyParams) WithTreeID(treeID *string) *GetPublicKeyParams {
+ o.SetTreeID(treeID)
+ return o
+}
+
+// SetTreeID adds the treeId to the get public key params
+func (o *GetPublicKeyParams) SetTreeID(treeID *string) {
+ o.TreeID = treeID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetPublicKeyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if o.TreeID != nil {
+
+ // query param treeID
+ var qrTreeID string
+
+ if o.TreeID != nil {
+ qrTreeID = *o.TreeID
+ }
+ qTreeID := qrTreeID
+ if qTreeID != "" {
+
+ if err := r.SetQueryParam("treeID", qTreeID); err != nil {
+ return err
+ }
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_responses.go
new file mode 100644
index 00000000000..70dbc452a3b
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_responses.go
@@ -0,0 +1,202 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package pubkey
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ stderrors "errors"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/sigstore/rekor/pkg/generated/models"
+)
+
+// GetPublicKeyReader is a Reader for the GetPublicKey structure.
+type GetPublicKeyReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetPublicKeyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetPublicKeyOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewGetPublicKeyDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetPublicKeyOK creates a GetPublicKeyOK with default headers values
+func NewGetPublicKeyOK() *GetPublicKeyOK {
+ return &GetPublicKeyOK{}
+}
+
+/*
+GetPublicKeyOK describes a response with status code 200, with default header values.
+
+The public key
+*/
+type GetPublicKeyOK struct {
+ Payload string
+}
+
+// IsSuccess returns true when this get public key o k response has a 2xx status code
+func (o *GetPublicKeyOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get public key o k response has a 3xx status code
+func (o *GetPublicKeyOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get public key o k response has a 4xx status code
+func (o *GetPublicKeyOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get public key o k response has a 5xx status code
+func (o *GetPublicKeyOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get public key o k response a status code equal to that given
+func (o *GetPublicKeyOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get public key o k response
+func (o *GetPublicKeyOK) Code() int {
+ return 200
+}
+
+func (o *GetPublicKeyOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKeyOK %s", 200, payload)
+}
+
+func (o *GetPublicKeyOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKeyOK %s", 200, payload)
+}
+
+func (o *GetPublicKeyOK) GetPayload() string {
+ return o.Payload
+}
+
+func (o *GetPublicKeyOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetPublicKeyDefault creates a GetPublicKeyDefault with default headers values
+func NewGetPublicKeyDefault(code int) *GetPublicKeyDefault {
+ return &GetPublicKeyDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetPublicKeyDefault describes a response with status code -1, with default header values.
+
+There was an internal error in the server while processing the request
+*/
+type GetPublicKeyDefault struct {
+ _statusCode int
+
+ Payload *models.Error
+}
+
+// IsSuccess returns true when this get public key default response has a 2xx status code
+func (o *GetPublicKeyDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get public key default response has a 3xx status code
+func (o *GetPublicKeyDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get public key default response has a 4xx status code
+func (o *GetPublicKeyDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get public key default response has a 5xx status code
+func (o *GetPublicKeyDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get public key default response a status code equal to that given
+func (o *GetPublicKeyDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get public key default response
+func (o *GetPublicKeyDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetPublicKeyDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKey default %s", o._statusCode, payload)
+}
+
+func (o *GetPublicKeyDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKey default %s", o._statusCode, payload)
+}
+
+func (o *GetPublicKeyDefault) GetPayload() *models.Error {
+ return o.Payload
+}
+
+func (o *GetPublicKeyDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Error)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/pubkey_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/pubkey_client.go
new file mode 100644
index 00000000000..7f4db7d8cf1
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/pubkey_client.go
@@ -0,0 +1,149 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package pubkey
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new pubkey API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new pubkey API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new pubkey API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for pubkey API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// This client is generated with a few options you might find useful for your swagger spec.
+//
+// Feel free to add you own set of options.
+
+// WithAccept allows the client to force the Accept header
+// to negotiate a specific Producer from the server.
+//
+// You may use this option to set arbitrary extensions to your MIME media type.
+func WithAccept(mime string) ClientOption {
+ return func(r *runtime.ClientOperation) {
+ r.ProducesMediaTypes = []string{mime}
+ }
+}
+
+// WithAcceptApplicationJSON sets the Accept header to "application/json".
+func WithAcceptApplicationJSON(r *runtime.ClientOperation) {
+ r.ProducesMediaTypes = []string{"application/json"}
+}
+
+// WithAcceptApplicationxPemFile sets the Accept header to "application/x-pem-file".
+func WithAcceptApplicationxPemFile(r *runtime.ClientOperation) {
+ r.ProducesMediaTypes = []string{"application/x-pem-file"}
+}
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ GetPublicKey(params *GetPublicKeyParams, opts ...ClientOption) (*GetPublicKeyOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+GetPublicKey retrieves the public key that can be used to validate the signed tree head
+
+Returns the public key that can be used to validate the signed tree head
+*/
+func (a *Client) GetPublicKey(params *GetPublicKeyParams, opts ...ClientOption) (*GetPublicKeyOK, error) {
+ // NOTE: parameters are not validated before sending
+ if params == nil {
+ params = NewGetPublicKeyParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "getPublicKey",
+ Method: "GET",
+ PathPattern: "/api/v1/log/publicKey",
+ ProducesMediaTypes: []string{"application/x-pem-file"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetPublicKeyReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+
+ // only one success response has to be checked
+ success, ok := result.(*GetPublicKeyOK)
+ if ok {
+ return success, nil
+ }
+
+ // unexpected success response.
+ //
+ // a default response is provided: fill this and return an error
+ unexpectedSuccess := result.(*GetPublicKeyDefault)
+
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/rekor_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/rekor_client.go
new file mode 100644
index 00000000000..bee3811184a
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/rekor_client.go
@@ -0,0 +1,143 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package client
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/sigstore/rekor/pkg/generated/client/entries"
+ "github.com/sigstore/rekor/pkg/generated/client/index"
+ "github.com/sigstore/rekor/pkg/generated/client/pubkey"
+ "github.com/sigstore/rekor/pkg/generated/client/tlog"
+)
+
+// Default rekor HTTP client.
+var Default = NewHTTPClient(nil)
+
+const (
+ // DefaultHost is the default Host
+ // found in Meta (info) section of spec file
+ DefaultHost string = "rekor.sigstore.dev"
+ // DefaultBasePath is the default BasePath
+ // found in Meta (info) section of spec file
+ DefaultBasePath string = "/"
+)
+
+// DefaultSchemes are the default schemes found in Meta (info) section of spec file
+var DefaultSchemes = []string{"http"}
+
+// NewHTTPClient creates a new rekor HTTP client.
+func NewHTTPClient(formats strfmt.Registry) *Rekor {
+ return NewHTTPClientWithConfig(formats, nil)
+}
+
+// NewHTTPClientWithConfig creates a new rekor HTTP client,
+// using a customizable transport config.
+func NewHTTPClientWithConfig(formats strfmt.Registry, cfg *TransportConfig) *Rekor {
+ // ensure nullable parameters have default
+ if cfg == nil {
+ cfg = DefaultTransportConfig()
+ }
+
+ // create transport and client
+ transport := httptransport.New(cfg.Host, cfg.BasePath, cfg.Schemes)
+ return New(transport, formats)
+}
+
+// New creates a new rekor client
+func New(transport runtime.ClientTransport, formats strfmt.Registry) *Rekor {
+ // ensure nullable parameters have default
+ if formats == nil {
+ formats = strfmt.Default
+ }
+
+ cli := new(Rekor)
+ cli.Transport = transport
+ cli.Entries = entries.New(transport, formats)
+ cli.Index = index.New(transport, formats)
+ cli.Pubkey = pubkey.New(transport, formats)
+ cli.Tlog = tlog.New(transport, formats)
+ return cli
+}
+
+// DefaultTransportConfig creates a TransportConfig with the
+// default settings taken from the meta section of the spec file.
+func DefaultTransportConfig() *TransportConfig {
+ return &TransportConfig{
+ Host: DefaultHost,
+ BasePath: DefaultBasePath,
+ Schemes: DefaultSchemes,
+ }
+}
+
+// TransportConfig contains the transport related info,
+// found in the meta section of the spec file.
+type TransportConfig struct {
+ Host string
+ BasePath string
+ Schemes []string
+}
+
+// WithHost overrides the default host,
+// provided by the meta section of the spec file.
+func (cfg *TransportConfig) WithHost(host string) *TransportConfig {
+ cfg.Host = host
+ return cfg
+}
+
+// WithBasePath overrides the default basePath,
+// provided by the meta section of the spec file.
+func (cfg *TransportConfig) WithBasePath(basePath string) *TransportConfig {
+ cfg.BasePath = basePath
+ return cfg
+}
+
+// WithSchemes overrides the default schemes,
+// provided by the meta section of the spec file.
+func (cfg *TransportConfig) WithSchemes(schemes []string) *TransportConfig {
+ cfg.Schemes = schemes
+ return cfg
+}
+
+// Rekor is a client for rekor
+type Rekor struct {
+ Entries entries.ClientService
+
+ Index index.ClientService
+
+ Pubkey pubkey.ClientService
+
+ Tlog tlog.ClientService
+
+ Transport runtime.ClientTransport
+}
+
+// SetTransport changes the transport on the client and all its subresources
+func (c *Rekor) SetTransport(transport runtime.ClientTransport) {
+ c.Transport = transport
+ c.Entries.SetTransport(transport)
+ c.Index.SetTransport(transport)
+ c.Pubkey.SetTransport(transport)
+ c.Tlog.SetTransport(transport)
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_parameters.go
new file mode 100644
index 00000000000..e0ae2cdd31e
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_parameters.go
@@ -0,0 +1,144 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package tlog
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetLogInfoParams creates a new GetLogInfoParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetLogInfoParams() *GetLogInfoParams {
+ return &GetLogInfoParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetLogInfoParamsWithTimeout creates a new GetLogInfoParams object
+// with the ability to set a timeout on a request.
+func NewGetLogInfoParamsWithTimeout(timeout time.Duration) *GetLogInfoParams {
+ return &GetLogInfoParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetLogInfoParamsWithContext creates a new GetLogInfoParams object
+// with the ability to set a context for a request.
+func NewGetLogInfoParamsWithContext(ctx context.Context) *GetLogInfoParams {
+ return &GetLogInfoParams{
+ Context: ctx,
+ }
+}
+
+// NewGetLogInfoParamsWithHTTPClient creates a new GetLogInfoParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetLogInfoParamsWithHTTPClient(client *http.Client) *GetLogInfoParams {
+ return &GetLogInfoParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetLogInfoParams contains all the parameters to send to the API endpoint
+
+ for the get log info operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetLogInfoParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get log info params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetLogInfoParams) WithDefaults() *GetLogInfoParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get log info params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetLogInfoParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get log info params
+func (o *GetLogInfoParams) WithTimeout(timeout time.Duration) *GetLogInfoParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get log info params
+func (o *GetLogInfoParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get log info params
+func (o *GetLogInfoParams) WithContext(ctx context.Context) *GetLogInfoParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get log info params
+func (o *GetLogInfoParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get log info params
+func (o *GetLogInfoParams) WithHTTPClient(client *http.Client) *GetLogInfoParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get log info params
+func (o *GetLogInfoParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetLogInfoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_responses.go
new file mode 100644
index 00000000000..3d98f88cdfd
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_responses.go
@@ -0,0 +1,204 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package tlog
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ stderrors "errors"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/sigstore/rekor/pkg/generated/models"
+)
+
+// GetLogInfoReader is a Reader for the GetLogInfo structure.
+type GetLogInfoReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetLogInfoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetLogInfoOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewGetLogInfoDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetLogInfoOK creates a GetLogInfoOK with default headers values
+func NewGetLogInfoOK() *GetLogInfoOK {
+ return &GetLogInfoOK{}
+}
+
+/*
+GetLogInfoOK describes a response with status code 200, with default header values.
+
+A JSON object with the root hash and tree size as properties
+*/
+type GetLogInfoOK struct {
+ Payload *models.LogInfo
+}
+
+// IsSuccess returns true when this get log info o k response has a 2xx status code
+func (o *GetLogInfoOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get log info o k response has a 3xx status code
+func (o *GetLogInfoOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get log info o k response has a 4xx status code
+func (o *GetLogInfoOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get log info o k response has a 5xx status code
+func (o *GetLogInfoOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get log info o k response a status code equal to that given
+func (o *GetLogInfoOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get log info o k response
+func (o *GetLogInfoOK) Code() int {
+ return 200
+}
+
+func (o *GetLogInfoOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfoOK %s", 200, payload)
+}
+
+func (o *GetLogInfoOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfoOK %s", 200, payload)
+}
+
+func (o *GetLogInfoOK) GetPayload() *models.LogInfo {
+ return o.Payload
+}
+
+func (o *GetLogInfoOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.LogInfo)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetLogInfoDefault creates a GetLogInfoDefault with default headers values
+func NewGetLogInfoDefault(code int) *GetLogInfoDefault {
+ return &GetLogInfoDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetLogInfoDefault describes a response with status code -1, with default header values.
+
+There was an internal error in the server while processing the request
+*/
+type GetLogInfoDefault struct {
+ _statusCode int
+
+ Payload *models.Error
+}
+
+// IsSuccess returns true when this get log info default response has a 2xx status code
+func (o *GetLogInfoDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get log info default response has a 3xx status code
+func (o *GetLogInfoDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get log info default response has a 4xx status code
+func (o *GetLogInfoDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get log info default response has a 5xx status code
+func (o *GetLogInfoDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get log info default response a status code equal to that given
+func (o *GetLogInfoDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get log info default response
+func (o *GetLogInfoDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetLogInfoDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfo default %s", o._statusCode, payload)
+}
+
+func (o *GetLogInfoDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfo default %s", o._statusCode, payload)
+}
+
+func (o *GetLogInfoDefault) GetPayload() *models.Error {
+ return o.Payload
+}
+
+func (o *GetLogInfoDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Error)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_parameters.go
new file mode 100644
index 00000000000..2b21ad887c1
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_parameters.go
@@ -0,0 +1,255 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package tlog
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewGetLogProofParams creates a new GetLogProofParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetLogProofParams() *GetLogProofParams {
+ return &GetLogProofParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetLogProofParamsWithTimeout creates a new GetLogProofParams object
+// with the ability to set a timeout on a request.
+func NewGetLogProofParamsWithTimeout(timeout time.Duration) *GetLogProofParams {
+ return &GetLogProofParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetLogProofParamsWithContext creates a new GetLogProofParams object
+// with the ability to set a context for a request.
+func NewGetLogProofParamsWithContext(ctx context.Context) *GetLogProofParams {
+ return &GetLogProofParams{
+ Context: ctx,
+ }
+}
+
+// NewGetLogProofParamsWithHTTPClient creates a new GetLogProofParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetLogProofParamsWithHTTPClient(client *http.Client) *GetLogProofParams {
+ return &GetLogProofParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetLogProofParams contains all the parameters to send to the API endpoint
+
+ for the get log proof operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetLogProofParams struct {
+
+ /* FirstSize.
+
+ The size of the tree that you wish to prove consistency from (1 means the beginning of the log) Defaults to 1 if not specified
+
+
+ Default: 1
+ */
+ FirstSize *int64
+
+ /* LastSize.
+
+ The size of the tree that you wish to prove consistency to
+ */
+ LastSize int64
+
+ /* TreeID.
+
+ The tree ID of the tree that you wish to prove consistency for
+ */
+ TreeID *string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get log proof params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetLogProofParams) WithDefaults() *GetLogProofParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get log proof params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetLogProofParams) SetDefaults() {
+ var (
+ firstSizeDefault = int64(1)
+ )
+
+ val := GetLogProofParams{
+ FirstSize: &firstSizeDefault,
+ }
+
+ val.timeout = o.timeout
+ val.Context = o.Context
+ val.HTTPClient = o.HTTPClient
+ *o = val
+}
+
+// WithTimeout adds the timeout to the get log proof params
+func (o *GetLogProofParams) WithTimeout(timeout time.Duration) *GetLogProofParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get log proof params
+func (o *GetLogProofParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get log proof params
+func (o *GetLogProofParams) WithContext(ctx context.Context) *GetLogProofParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get log proof params
+func (o *GetLogProofParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get log proof params
+func (o *GetLogProofParams) WithHTTPClient(client *http.Client) *GetLogProofParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get log proof params
+func (o *GetLogProofParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithFirstSize adds the firstSize to the get log proof params
+func (o *GetLogProofParams) WithFirstSize(firstSize *int64) *GetLogProofParams {
+ o.SetFirstSize(firstSize)
+ return o
+}
+
+// SetFirstSize adds the firstSize to the get log proof params
+func (o *GetLogProofParams) SetFirstSize(firstSize *int64) {
+ o.FirstSize = firstSize
+}
+
+// WithLastSize adds the lastSize to the get log proof params
+func (o *GetLogProofParams) WithLastSize(lastSize int64) *GetLogProofParams {
+ o.SetLastSize(lastSize)
+ return o
+}
+
+// SetLastSize adds the lastSize to the get log proof params
+func (o *GetLogProofParams) SetLastSize(lastSize int64) {
+ o.LastSize = lastSize
+}
+
+// WithTreeID adds the treeID to the get log proof params
+func (o *GetLogProofParams) WithTreeID(treeID *string) *GetLogProofParams {
+ o.SetTreeID(treeID)
+ return o
+}
+
+// SetTreeID adds the treeId to the get log proof params
+func (o *GetLogProofParams) SetTreeID(treeID *string) {
+ o.TreeID = treeID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetLogProofParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if o.FirstSize != nil {
+
+ // query param firstSize
+ var qrFirstSize int64
+
+ if o.FirstSize != nil {
+ qrFirstSize = *o.FirstSize
+ }
+ qFirstSize := swag.FormatInt64(qrFirstSize)
+ if qFirstSize != "" {
+
+ if err := r.SetQueryParam("firstSize", qFirstSize); err != nil {
+ return err
+ }
+ }
+ }
+
+ // query param lastSize
+ qrLastSize := o.LastSize
+ qLastSize := swag.FormatInt64(qrLastSize)
+ if qLastSize != "" {
+
+ if err := r.SetQueryParam("lastSize", qLastSize); err != nil {
+ return err
+ }
+ }
+
+ if o.TreeID != nil {
+
+ // query param treeID
+ var qrTreeID string
+
+ if o.TreeID != nil {
+ qrTreeID = *o.TreeID
+ }
+ qTreeID := qrTreeID
+ if qTreeID != "" {
+
+ if err := r.SetQueryParam("treeID", qTreeID); err != nil {
+ return err
+ }
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_responses.go
new file mode 100644
index 00000000000..ae8e50d2879
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_responses.go
@@ -0,0 +1,280 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package tlog
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ stderrors "errors"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/sigstore/rekor/pkg/generated/models"
+)
+
+// GetLogProofReader is a Reader for the GetLogProof structure.
+type GetLogProofReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetLogProofReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (any, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetLogProofOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewGetLogProofBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ result := NewGetLogProofDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetLogProofOK creates a GetLogProofOK with default headers values
+func NewGetLogProofOK() *GetLogProofOK {
+ return &GetLogProofOK{}
+}
+
+/*
+GetLogProofOK describes a response with status code 200, with default header values.
+
+All hashes required to compute the consistency proof
+*/
+type GetLogProofOK struct {
+ Payload *models.ConsistencyProof
+}
+
+// IsSuccess returns true when this get log proof o k response has a 2xx status code
+func (o *GetLogProofOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get log proof o k response has a 3xx status code
+func (o *GetLogProofOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get log proof o k response has a 4xx status code
+func (o *GetLogProofOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get log proof o k response has a 5xx status code
+func (o *GetLogProofOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get log proof o k response a status code equal to that given
+func (o *GetLogProofOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get log proof o k response
+func (o *GetLogProofOK) Code() int {
+ return 200
+}
+
+func (o *GetLogProofOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofOK %s", 200, payload)
+}
+
+func (o *GetLogProofOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofOK %s", 200, payload)
+}
+
+func (o *GetLogProofOK) GetPayload() *models.ConsistencyProof {
+ return o.Payload
+}
+
+func (o *GetLogProofOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.ConsistencyProof)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetLogProofBadRequest creates a GetLogProofBadRequest with default headers values
+func NewGetLogProofBadRequest() *GetLogProofBadRequest {
+ return &GetLogProofBadRequest{}
+}
+
+/*
+GetLogProofBadRequest describes a response with status code 400, with default header values.
+
+The content supplied to the server was invalid
+*/
+type GetLogProofBadRequest struct {
+ Payload *models.Error
+}
+
+// IsSuccess returns true when this get log proof bad request response has a 2xx status code
+func (o *GetLogProofBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get log proof bad request response has a 3xx status code
+func (o *GetLogProofBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get log proof bad request response has a 4xx status code
+func (o *GetLogProofBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get log proof bad request response has a 5xx status code
+func (o *GetLogProofBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get log proof bad request response a status code equal to that given
+func (o *GetLogProofBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the get log proof bad request response
+func (o *GetLogProofBadRequest) Code() int {
+ return 400
+}
+
+func (o *GetLogProofBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofBadRequest %s", 400, payload)
+}
+
+func (o *GetLogProofBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofBadRequest %s", 400, payload)
+}
+
+func (o *GetLogProofBadRequest) GetPayload() *models.Error {
+ return o.Payload
+}
+
+func (o *GetLogProofBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Error)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetLogProofDefault creates a GetLogProofDefault with default headers values
+func NewGetLogProofDefault(code int) *GetLogProofDefault {
+ return &GetLogProofDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetLogProofDefault describes a response with status code -1, with default header values.
+
+There was an internal error in the server while processing the request
+*/
+type GetLogProofDefault struct {
+ _statusCode int
+
+ Payload *models.Error
+}
+
+// IsSuccess returns true when this get log proof default response has a 2xx status code
+func (o *GetLogProofDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get log proof default response has a 3xx status code
+func (o *GetLogProofDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get log proof default response has a 4xx status code
+func (o *GetLogProofDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get log proof default response has a 5xx status code
+func (o *GetLogProofDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get log proof default response a status code equal to that given
+func (o *GetLogProofDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get log proof default response
+func (o *GetLogProofDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetLogProofDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProof default %s", o._statusCode, payload)
+}
+
+func (o *GetLogProofDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProof default %s", o._statusCode, payload)
+}
+
+func (o *GetLogProofDefault) GetPayload() *models.Error {
+ return o.Payload
+}
+
+func (o *GetLogProofDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Error)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/tlog_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/tlog_client.go
new file mode 100644
index 00000000000..ff174ebfa23
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/tlog_client.go
@@ -0,0 +1,171 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package tlog
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new tlog API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new tlog API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new tlog API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for tlog API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ GetLogInfo(params *GetLogInfoParams, opts ...ClientOption) (*GetLogInfoOK, error)
+
+ GetLogProof(params *GetLogProofParams, opts ...ClientOption) (*GetLogProofOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+GetLogInfo gets information about the current state of the transparency log
+
+Returns the current root hash and size of the merkle tree used to store the log entries.
+*/
+func (a *Client) GetLogInfo(params *GetLogInfoParams, opts ...ClientOption) (*GetLogInfoOK, error) {
+ // NOTE: parameters are not validated before sending
+ if params == nil {
+ params = NewGetLogInfoParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "getLogInfo",
+ Method: "GET",
+ PathPattern: "/api/v1/log",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetLogInfoReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+
+ // only one success response has to be checked
+ success, ok := result.(*GetLogInfoOK)
+ if ok {
+ return success, nil
+ }
+
+ // unexpected success response.
+ //
+ // a default response is provided: fill this and return an error
+ unexpectedSuccess := result.(*GetLogInfoDefault)
+
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+GetLogProof gets information required to generate a consistency proof for the transparency log
+
+Returns a list of hashes for specified tree sizes that can be used to confirm the consistency of the transparency log
+*/
+func (a *Client) GetLogProof(params *GetLogProofParams, opts ...ClientOption) (*GetLogProofOK, error) {
+ // NOTE: parameters are not validated before sending
+ if params == nil {
+ params = NewGetLogProofParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "getLogProof",
+ Method: "GET",
+ PathPattern: "/api/v1/log/proof",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetLogProofReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+
+ // only one success response has to be checked
+ success, ok := result.(*GetLogProofOK)
+ if ok {
+ return success, nil
+ }
+
+ // unexpected success response.
+ //
+ // a default response is provided: fill this and return an error
+ unexpectedSuccess := result.(*GetLogProofDefault)
+
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine.go
new file mode 100644
index 00000000000..5607679fdf7
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine.go
@@ -0,0 +1,210 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// Alpine Alpine package
+//
+// swagger:model alpine
+type Alpine struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec AlpineSchema `json:"spec"`
+}
+
+// Kind gets the kind of this subtype
+func (m *Alpine) Kind() string {
+ return "alpine"
+}
+
+// SetKind sets the kind of this subtype
+func (m *Alpine) SetKind(val string) {
+}
+
+// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure
+func (m *Alpine) UnmarshalJSON(raw []byte) error {
+ var data struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec AlpineSchema `json:"spec"`
+ }
+ buf := bytes.NewBuffer(raw)
+ dec := json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&data); err != nil {
+ return err
+ }
+
+ var base struct {
+ /* Just the base type fields. Used for unmashalling polymorphic types.*/
+
+ Kind string `json:"kind"`
+ }
+ buf = bytes.NewBuffer(raw)
+ dec = json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&base); err != nil {
+ return err
+ }
+
+ var result Alpine
+
+ if base.Kind != result.Kind() {
+ /* Not the type we're looking for. */
+ return errors.New(422, "invalid kind value: %q", base.Kind)
+ }
+
+ result.APIVersion = data.APIVersion
+ result.Spec = data.Spec
+
+ *m = result
+
+ return nil
+}
+
+// MarshalJSON marshals this object with a polymorphic type to a JSON structure
+func (m Alpine) MarshalJSON() ([]byte, error) {
+ var b1, b2, b3 []byte
+ var err error
+ b1, err = json.Marshal(struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec AlpineSchema `json:"spec"`
+ }{
+
+ APIVersion: m.APIVersion,
+
+ Spec: m.Spec,
+ })
+ if err != nil {
+ return nil, err
+ }
+ b2, err = json.Marshal(struct {
+ Kind string `json:"kind"`
+ }{
+
+ Kind: m.Kind(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return swag.ConcatJSON(b1, b2, b3), nil
+}
+
+// Validate validates this alpine
+func (m *Alpine) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAPIVersion(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSpec(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *Alpine) validateAPIVersion(formats strfmt.Registry) error {
+
+ if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *Alpine) validateSpec(formats strfmt.Registry) error {
+
+ if m.Spec == nil {
+ return errors.Required("spec", "body", nil)
+ }
+
+ return nil
+}
+
+// ContextValidate validate this alpine based on the context it is used
+func (m *Alpine) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *Alpine) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *Alpine) UnmarshalBinary(b []byte) error {
+ var res Alpine
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_schema.go
new file mode 100644
index 00000000000..00f76926c39
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_schema.go
@@ -0,0 +1,29 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// AlpineSchema Alpine Package Schema
+//
+// # Schema for Alpine package objects
+//
+// swagger:model alpineSchema
+type AlpineSchema any
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go
new file mode 100644
index 00000000000..c77008ce7a8
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go
@@ -0,0 +1,480 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "encoding/json"
+ stderrors "errors"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// AlpineV001Schema Alpine v0.0.1 Schema
+//
+// # Schema for Alpine Package entries
+//
+// swagger:model alpineV001Schema
+type AlpineV001Schema struct {
+
+ // package
+ // Required: true
+ Package *AlpineV001SchemaPackage `json:"package"`
+
+ // public key
+ // Required: true
+ PublicKey *AlpineV001SchemaPublicKey `json:"publicKey"`
+}
+
+// Validate validates this alpine v001 schema
+func (m *AlpineV001Schema) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validatePackage(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validatePublicKey(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *AlpineV001Schema) validatePackage(formats strfmt.Registry) error {
+
+ if err := validate.Required("package", "body", m.Package); err != nil {
+ return err
+ }
+
+ if m.Package != nil {
+ if err := m.Package.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("package")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("package")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *AlpineV001Schema) validatePublicKey(formats strfmt.Registry) error {
+
+ if err := validate.Required("publicKey", "body", m.PublicKey); err != nil {
+ return err
+ }
+
+ if m.PublicKey != nil {
+ if err := m.PublicKey.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("publicKey")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("publicKey")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this alpine v001 schema based on the context it is used
+func (m *AlpineV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidatePackage(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidatePublicKey(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *AlpineV001Schema) contextValidatePackage(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Package != nil {
+
+ if err := m.Package.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("package")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("package")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *AlpineV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.PublicKey != nil {
+
+ if err := m.PublicKey.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("publicKey")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("publicKey")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *AlpineV001Schema) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *AlpineV001Schema) UnmarshalBinary(b []byte) error {
+ var res AlpineV001Schema
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// AlpineV001SchemaPackage Information about the package associated with the entry
+//
+// swagger:model AlpineV001SchemaPackage
+type AlpineV001SchemaPackage struct {
+
+ // Specifies the package inline within the document
+ // Format: byte
+ Content strfmt.Base64 `json:"content,omitempty"`
+
+ // hash
+ Hash *AlpineV001SchemaPackageHash `json:"hash,omitempty"`
+
+ // Values of the .PKGINFO key / value pairs
+ // Read Only: true
+ Pkginfo map[string]string `json:"pkginfo,omitempty"`
+}
+
+// Validate validates this alpine v001 schema package
+func (m *AlpineV001SchemaPackage) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *AlpineV001SchemaPackage) validateHash(formats strfmt.Registry) error {
+ if swag.IsZero(m.Hash) { // not required
+ return nil
+ }
+
+ if m.Hash != nil {
+ if err := m.Hash.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("package" + "." + "hash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("package" + "." + "hash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this alpine v001 schema package based on the context it is used
+func (m *AlpineV001SchemaPackage) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateHash(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidatePkginfo(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *AlpineV001SchemaPackage) contextValidateHash(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Hash != nil {
+
+ if swag.IsZero(m.Hash) { // not required
+ return nil
+ }
+
+ if err := m.Hash.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("package" + "." + "hash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("package" + "." + "hash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *AlpineV001SchemaPackage) contextValidatePkginfo(ctx context.Context, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *AlpineV001SchemaPackage) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *AlpineV001SchemaPackage) UnmarshalBinary(b []byte) error {
+ var res AlpineV001SchemaPackage
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// AlpineV001SchemaPackageHash Specifies the hash algorithm and value for the package
+//
+// swagger:model AlpineV001SchemaPackageHash
+type AlpineV001SchemaPackageHash struct {
+
+ // The hashing function used to compute the hash value
+ // Required: true
+ // Enum: ["sha256"]
+ Algorithm *string `json:"algorithm"`
+
+ // The hash value for the package
+ // Required: true
+ Value *string `json:"value"`
+}
+
+// Validate validates this alpine v001 schema package hash
+func (m *AlpineV001SchemaPackageHash) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAlgorithm(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateValue(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+var alpineV001SchemaPackageHashTypeAlgorithmPropEnum []any
+
+func init() {
+ var res []string
+ if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil {
+ panic(err)
+ }
+ for _, v := range res {
+ alpineV001SchemaPackageHashTypeAlgorithmPropEnum = append(alpineV001SchemaPackageHashTypeAlgorithmPropEnum, v)
+ }
+}
+
+const (
+
+ // AlpineV001SchemaPackageHashAlgorithmSha256 captures enum value "sha256"
+ AlpineV001SchemaPackageHashAlgorithmSha256 string = "sha256"
+)
+
+// prop value enum
+func (m *AlpineV001SchemaPackageHash) validateAlgorithmEnum(path, location string, value string) error {
+ if err := validate.EnumCase(path, location, value, alpineV001SchemaPackageHashTypeAlgorithmPropEnum, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *AlpineV001SchemaPackageHash) validateAlgorithm(formats strfmt.Registry) error {
+
+ if err := validate.Required("package"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil {
+ return err
+ }
+
+ // value enum
+ if err := m.validateAlgorithmEnum("package"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *AlpineV001SchemaPackageHash) validateValue(formats strfmt.Registry) error {
+
+ if err := validate.Required("package"+"."+"hash"+"."+"value", "body", m.Value); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validate this alpine v001 schema package hash based on the context it is used
+func (m *AlpineV001SchemaPackageHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *AlpineV001SchemaPackageHash) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *AlpineV001SchemaPackageHash) UnmarshalBinary(b []byte) error {
+ var res AlpineV001SchemaPackageHash
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// AlpineV001SchemaPublicKey The public key that can verify the package signature
+//
+// swagger:model AlpineV001SchemaPublicKey
+type AlpineV001SchemaPublicKey struct {
+
+ // Specifies the content of the public key inline within the document
+ // Required: true
+ // Format: byte
+ Content *strfmt.Base64 `json:"content"`
+}
+
+// Validate validates this alpine v001 schema public key
+func (m *AlpineV001SchemaPublicKey) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateContent(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *AlpineV001SchemaPublicKey) validateContent(formats strfmt.Registry) error {
+
+ if err := validate.Required("publicKey"+"."+"content", "body", m.Content); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validates this alpine v001 schema public key based on context it is used
+func (m *AlpineV001SchemaPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *AlpineV001SchemaPublicKey) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *AlpineV001SchemaPublicKey) UnmarshalBinary(b []byte) error {
+ var res AlpineV001SchemaPublicKey
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/consistency_proof.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/consistency_proof.go
new file mode 100644
index 00000000000..804ddd11a96
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/consistency_proof.go
@@ -0,0 +1,118 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// ConsistencyProof consistency proof
+//
+// swagger:model ConsistencyProof
+type ConsistencyProof struct {
+
+ // hashes
+ // Required: true
+ Hashes []string `json:"hashes"`
+
+ // The hash value stored at the root of the merkle tree at the time the proof was generated
+ // Required: true
+ // Pattern: ^[0-9a-fA-F]{64}$
+ RootHash *string `json:"rootHash"`
+}
+
+// Validate validates this consistency proof
+func (m *ConsistencyProof) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateHashes(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateRootHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *ConsistencyProof) validateHashes(formats strfmt.Registry) error {
+
+ if err := validate.Required("hashes", "body", m.Hashes); err != nil {
+ return err
+ }
+
+ for i := 0; i < len(m.Hashes); i++ {
+
+ if err := validate.Pattern("hashes"+"."+strconv.Itoa(i), "body", m.Hashes[i], `^[0-9a-fA-F]{64}$`); err != nil {
+ return err
+ }
+
+ }
+
+ return nil
+}
+
+func (m *ConsistencyProof) validateRootHash(formats strfmt.Registry) error {
+
+ if err := validate.Required("rootHash", "body", m.RootHash); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validates this consistency proof based on context it is used
+func (m *ConsistencyProof) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *ConsistencyProof) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *ConsistencyProof) UnmarshalBinary(b []byte) error {
+ var res ConsistencyProof
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go
new file mode 100644
index 00000000000..8de4083baf8
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go
@@ -0,0 +1,210 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// Cose COSE object
+//
+// swagger:model cose
+type Cose struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec CoseSchema `json:"spec"`
+}
+
+// Kind gets the kind of this subtype
+func (m *Cose) Kind() string {
+ return "cose"
+}
+
+// SetKind sets the kind of this subtype
+func (m *Cose) SetKind(val string) {
+}
+
+// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure
+func (m *Cose) UnmarshalJSON(raw []byte) error {
+ var data struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec CoseSchema `json:"spec"`
+ }
+ buf := bytes.NewBuffer(raw)
+ dec := json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&data); err != nil {
+ return err
+ }
+
+ var base struct {
+ /* Just the base type fields. Used for unmashalling polymorphic types.*/
+
+ Kind string `json:"kind"`
+ }
+ buf = bytes.NewBuffer(raw)
+ dec = json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&base); err != nil {
+ return err
+ }
+
+ var result Cose
+
+ if base.Kind != result.Kind() {
+ /* Not the type we're looking for. */
+ return errors.New(422, "invalid kind value: %q", base.Kind)
+ }
+
+ result.APIVersion = data.APIVersion
+ result.Spec = data.Spec
+
+ *m = result
+
+ return nil
+}
+
+// MarshalJSON marshals this object with a polymorphic type to a JSON structure
+func (m Cose) MarshalJSON() ([]byte, error) {
+ var b1, b2, b3 []byte
+ var err error
+ b1, err = json.Marshal(struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec CoseSchema `json:"spec"`
+ }{
+
+ APIVersion: m.APIVersion,
+
+ Spec: m.Spec,
+ })
+ if err != nil {
+ return nil, err
+ }
+ b2, err = json.Marshal(struct {
+ Kind string `json:"kind"`
+ }{
+
+ Kind: m.Kind(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return swag.ConcatJSON(b1, b2, b3), nil
+}
+
+// Validate validates this cose
+func (m *Cose) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAPIVersion(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSpec(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *Cose) validateAPIVersion(formats strfmt.Registry) error {
+
+ if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *Cose) validateSpec(formats strfmt.Registry) error {
+
+ if m.Spec == nil {
+ return errors.Required("spec", "body", nil)
+ }
+
+ return nil
+}
+
+// ContextValidate validate this cose based on the context it is used
+func (m *Cose) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *Cose) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *Cose) UnmarshalBinary(b []byte) error {
+ var res Cose
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go
new file mode 100644
index 00000000000..8f901605038
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go
@@ -0,0 +1,29 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// CoseSchema COSE Schema
+//
+// # COSE for Rekord objects
+//
+// swagger:model coseSchema
+type CoseSchema any
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go
new file mode 100644
index 00000000000..9dafe29ce37
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go
@@ -0,0 +1,546 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "encoding/json"
+ stderrors "errors"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// CoseV001Schema cose v0.0.1 Schema
+//
+// # Schema for cose object
+//
+// swagger:model coseV001Schema
+type CoseV001Schema struct {
+
+ // data
+ Data *CoseV001SchemaData `json:"data,omitempty"`
+
+ // The COSE Sign1 Message
+ // Format: byte
+ Message strfmt.Base64 `json:"message,omitempty"`
+
+ // The public key that can verify the signature
+ // Required: true
+ // Format: byte
+ PublicKey *strfmt.Base64 `json:"publicKey"`
+}
+
+// Validate validates this cose v001 schema
+func (m *CoseV001Schema) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateData(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validatePublicKey(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *CoseV001Schema) validateData(formats strfmt.Registry) error {
+ if swag.IsZero(m.Data) { // not required
+ return nil
+ }
+
+ if m.Data != nil {
+ if err := m.Data.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("data")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("data")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *CoseV001Schema) validatePublicKey(formats strfmt.Registry) error {
+
+ if err := validate.Required("publicKey", "body", m.PublicKey); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validate this cose v001 schema based on the context it is used
+func (m *CoseV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateData(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *CoseV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Data != nil {
+
+ if swag.IsZero(m.Data) { // not required
+ return nil
+ }
+
+ if err := m.Data.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("data")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("data")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *CoseV001Schema) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *CoseV001Schema) UnmarshalBinary(b []byte) error {
+ var res CoseV001Schema
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// CoseV001SchemaData Information about the content associated with the entry
+//
+// swagger:model CoseV001SchemaData
+type CoseV001SchemaData struct {
+
+ // Specifies the additional authenticated data required to verify the signature
+ // Format: byte
+ Aad strfmt.Base64 `json:"aad,omitempty"`
+
+ // envelope hash
+ EnvelopeHash *CoseV001SchemaDataEnvelopeHash `json:"envelopeHash,omitempty"`
+
+ // payload hash
+ PayloadHash *CoseV001SchemaDataPayloadHash `json:"payloadHash,omitempty"`
+}
+
+// Validate validates this cose v001 schema data
+func (m *CoseV001SchemaData) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateEnvelopeHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validatePayloadHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *CoseV001SchemaData) validateEnvelopeHash(formats strfmt.Registry) error {
+ if swag.IsZero(m.EnvelopeHash) { // not required
+ return nil
+ }
+
+ if m.EnvelopeHash != nil {
+ if err := m.EnvelopeHash.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("data" + "." + "envelopeHash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("data" + "." + "envelopeHash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *CoseV001SchemaData) validatePayloadHash(formats strfmt.Registry) error {
+ if swag.IsZero(m.PayloadHash) { // not required
+ return nil
+ }
+
+ if m.PayloadHash != nil {
+ if err := m.PayloadHash.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("data" + "." + "payloadHash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("data" + "." + "payloadHash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this cose v001 schema data based on the context it is used
+func (m *CoseV001SchemaData) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateEnvelopeHash(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidatePayloadHash(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *CoseV001SchemaData) contextValidateEnvelopeHash(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.EnvelopeHash != nil {
+
+ if swag.IsZero(m.EnvelopeHash) { // not required
+ return nil
+ }
+
+ if err := m.EnvelopeHash.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("data" + "." + "envelopeHash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("data" + "." + "envelopeHash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *CoseV001SchemaData) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.PayloadHash != nil {
+
+ if swag.IsZero(m.PayloadHash) { // not required
+ return nil
+ }
+
+ if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("data" + "." + "payloadHash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("data" + "." + "payloadHash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *CoseV001SchemaData) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *CoseV001SchemaData) UnmarshalBinary(b []byte) error {
+ var res CoseV001SchemaData
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// CoseV001SchemaDataEnvelopeHash Specifies the hash algorithm and value for the COSE envelope
+//
+// swagger:model CoseV001SchemaDataEnvelopeHash
+type CoseV001SchemaDataEnvelopeHash struct {
+
+ // The hashing function used to compute the hash value
+ // Required: true
+ // Enum: ["sha256"]
+ Algorithm *string `json:"algorithm"`
+
+ // The hash value for the envelope
+ // Required: true
+ Value *string `json:"value"`
+}
+
+// Validate validates this cose v001 schema data envelope hash
+func (m *CoseV001SchemaDataEnvelopeHash) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAlgorithm(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateValue(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+var coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum []any
+
+func init() {
+ var res []string
+ if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil {
+ panic(err)
+ }
+ for _, v := range res {
+ coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum = append(coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum, v)
+ }
+}
+
+const (
+
+ // CoseV001SchemaDataEnvelopeHashAlgorithmSha256 captures enum value "sha256"
+ CoseV001SchemaDataEnvelopeHashAlgorithmSha256 string = "sha256"
+)
+
+// prop value enum
+func (m *CoseV001SchemaDataEnvelopeHash) validateAlgorithmEnum(path, location string, value string) error {
+ if err := validate.EnumCase(path, location, value, coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *CoseV001SchemaDataEnvelopeHash) validateAlgorithm(formats strfmt.Registry) error {
+
+ if err := validate.Required("data"+"."+"envelopeHash"+"."+"algorithm", "body", m.Algorithm); err != nil {
+ return err
+ }
+
+ // value enum
+ if err := m.validateAlgorithmEnum("data"+"."+"envelopeHash"+"."+"algorithm", "body", *m.Algorithm); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *CoseV001SchemaDataEnvelopeHash) validateValue(formats strfmt.Registry) error {
+
+ if err := validate.Required("data"+"."+"envelopeHash"+"."+"value", "body", m.Value); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validate this cose v001 schema data envelope hash based on the context it is used
+func (m *CoseV001SchemaDataEnvelopeHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *CoseV001SchemaDataEnvelopeHash) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *CoseV001SchemaDataEnvelopeHash) UnmarshalBinary(b []byte) error {
+ var res CoseV001SchemaDataEnvelopeHash
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// CoseV001SchemaDataPayloadHash Specifies the hash algorithm and value for the content
+//
+// swagger:model CoseV001SchemaDataPayloadHash
+type CoseV001SchemaDataPayloadHash struct {
+
+ // The hashing function used to compute the hash value
+ // Required: true
+ // Enum: ["sha256"]
+ Algorithm *string `json:"algorithm"`
+
+ // The hash value for the content
+ // Required: true
+ Value *string `json:"value"`
+}
+
+// Validate validates this cose v001 schema data payload hash
+func (m *CoseV001SchemaDataPayloadHash) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAlgorithm(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateValue(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+var coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum []any
+
+func init() {
+ var res []string
+ if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil {
+ panic(err)
+ }
+ for _, v := range res {
+ coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum = append(coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum, v)
+ }
+}
+
+const (
+
+ // CoseV001SchemaDataPayloadHashAlgorithmSha256 captures enum value "sha256"
+ CoseV001SchemaDataPayloadHashAlgorithmSha256 string = "sha256"
+)
+
+// prop value enum
+func (m *CoseV001SchemaDataPayloadHash) validateAlgorithmEnum(path, location string, value string) error {
+ if err := validate.EnumCase(path, location, value, coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *CoseV001SchemaDataPayloadHash) validateAlgorithm(formats strfmt.Registry) error {
+
+ if err := validate.Required("data"+"."+"payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil {
+ return err
+ }
+
+ // value enum
+ if err := m.validateAlgorithmEnum("data"+"."+"payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *CoseV001SchemaDataPayloadHash) validateValue(formats strfmt.Registry) error {
+
+ if err := validate.Required("data"+"."+"payloadHash"+"."+"value", "body", m.Value); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validate this cose v001 schema data payload hash based on the context it is used
+func (m *CoseV001SchemaDataPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *CoseV001SchemaDataPayloadHash) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *CoseV001SchemaDataPayloadHash) UnmarshalBinary(b []byte) error {
+ var res CoseV001SchemaDataPayloadHash
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go
new file mode 100644
index 00000000000..dde562054c3
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go
@@ -0,0 +1,210 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// DSSE DSSE envelope
+//
+// swagger:model dsse
+type DSSE struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec DSSESchema `json:"spec"`
+}
+
+// Kind gets the kind of this subtype
+func (m *DSSE) Kind() string {
+ return "dsse"
+}
+
+// SetKind sets the kind of this subtype
+func (m *DSSE) SetKind(val string) {
+}
+
+// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure
+func (m *DSSE) UnmarshalJSON(raw []byte) error {
+ var data struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec DSSESchema `json:"spec"`
+ }
+ buf := bytes.NewBuffer(raw)
+ dec := json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&data); err != nil {
+ return err
+ }
+
+ var base struct {
+ /* Just the base type fields. Used for unmashalling polymorphic types.*/
+
+ Kind string `json:"kind"`
+ }
+ buf = bytes.NewBuffer(raw)
+ dec = json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&base); err != nil {
+ return err
+ }
+
+ var result DSSE
+
+ if base.Kind != result.Kind() {
+ /* Not the type we're looking for. */
+ return errors.New(422, "invalid kind value: %q", base.Kind)
+ }
+
+ result.APIVersion = data.APIVersion
+ result.Spec = data.Spec
+
+ *m = result
+
+ return nil
+}
+
+// MarshalJSON marshals this object with a polymorphic type to a JSON structure
+func (m DSSE) MarshalJSON() ([]byte, error) {
+ var b1, b2, b3 []byte
+ var err error
+ b1, err = json.Marshal(struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec DSSESchema `json:"spec"`
+ }{
+
+ APIVersion: m.APIVersion,
+
+ Spec: m.Spec,
+ })
+ if err != nil {
+ return nil, err
+ }
+ b2, err = json.Marshal(struct {
+ Kind string `json:"kind"`
+ }{
+
+ Kind: m.Kind(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return swag.ConcatJSON(b1, b2, b3), nil
+}
+
+// Validate validates this dsse
+func (m *DSSE) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAPIVersion(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSpec(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *DSSE) validateAPIVersion(formats strfmt.Registry) error {
+
+ if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *DSSE) validateSpec(formats strfmt.Registry) error {
+
+ if m.Spec == nil {
+ return errors.Required("spec", "body", nil)
+ }
+
+ return nil
+}
+
+// ContextValidate validate this dsse based on the context it is used
+func (m *DSSE) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *DSSE) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *DSSE) UnmarshalBinary(b []byte) error {
+ var res DSSE
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go
new file mode 100644
index 00000000000..0dc5c87ed37
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go
@@ -0,0 +1,29 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// DSSESchema DSSE Schema
+//
+// log entry schema for dsse envelopes
+//
+// swagger:model dsseSchema
+type DSSESchema any
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go
new file mode 100644
index 00000000000..8cad568daf8
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go
@@ -0,0 +1,718 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "encoding/json"
+ stderrors "errors"
+ "strconv"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// DSSEV001Schema DSSE v0.0.1 Schema
+//
+// # Schema for DSSE envelopes
+//
+// swagger:model dsseV001Schema
+type DSSEV001Schema struct {
+
+ // envelope hash
+ EnvelopeHash *DSSEV001SchemaEnvelopeHash `json:"envelopeHash,omitempty"`
+
+ // payload hash
+ PayloadHash *DSSEV001SchemaPayloadHash `json:"payloadHash,omitempty"`
+
+ // proposed content
+ ProposedContent *DSSEV001SchemaProposedContent `json:"proposedContent,omitempty"`
+
+ // extracted collection of all signatures of the envelope's payload; elements will be sorted by lexicographical order of the base64 encoded signature strings
+ // Read Only: true
+ // Min Items: 1
+ Signatures []*DSSEV001SchemaSignaturesItems0 `json:"signatures"`
+}
+
+// Validate validates this dsse v001 schema
+func (m *DSSEV001Schema) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateEnvelopeHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validatePayloadHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateProposedContent(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSignatures(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *DSSEV001Schema) validateEnvelopeHash(formats strfmt.Registry) error {
+ if swag.IsZero(m.EnvelopeHash) { // not required
+ return nil
+ }
+
+ if m.EnvelopeHash != nil {
+ if err := m.EnvelopeHash.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("envelopeHash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("envelopeHash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *DSSEV001Schema) validatePayloadHash(formats strfmt.Registry) error {
+ if swag.IsZero(m.PayloadHash) { // not required
+ return nil
+ }
+
+ if m.PayloadHash != nil {
+ if err := m.PayloadHash.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("payloadHash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("payloadHash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *DSSEV001Schema) validateProposedContent(formats strfmt.Registry) error {
+ if swag.IsZero(m.ProposedContent) { // not required
+ return nil
+ }
+
+ if m.ProposedContent != nil {
+ if err := m.ProposedContent.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("proposedContent")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("proposedContent")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *DSSEV001Schema) validateSignatures(formats strfmt.Registry) error {
+ if swag.IsZero(m.Signatures) { // not required
+ return nil
+ }
+
+ iSignaturesSize := int64(len(m.Signatures))
+
+ if err := validate.MinItems("signatures", "body", iSignaturesSize, 1); err != nil {
+ return err
+ }
+
+ for i := 0; i < len(m.Signatures); i++ {
+ if swag.IsZero(m.Signatures[i]) { // not required
+ continue
+ }
+
+ if m.Signatures[i] != nil {
+ if err := m.Signatures[i].Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("signatures" + "." + strconv.Itoa(i))
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("signatures" + "." + strconv.Itoa(i))
+ }
+
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// ContextValidate validate this dsse v001 schema based on the context it is used
+func (m *DSSEV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateEnvelopeHash(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidatePayloadHash(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateProposedContent(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateSignatures(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *DSSEV001Schema) contextValidateEnvelopeHash(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.EnvelopeHash != nil {
+
+ if swag.IsZero(m.EnvelopeHash) { // not required
+ return nil
+ }
+
+ if err := m.EnvelopeHash.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("envelopeHash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("envelopeHash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *DSSEV001Schema) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.PayloadHash != nil {
+
+ if swag.IsZero(m.PayloadHash) { // not required
+ return nil
+ }
+
+ if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("payloadHash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("payloadHash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *DSSEV001Schema) contextValidateProposedContent(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.ProposedContent != nil {
+
+ if swag.IsZero(m.ProposedContent) { // not required
+ return nil
+ }
+
+ if err := m.ProposedContent.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("proposedContent")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("proposedContent")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *DSSEV001Schema) contextValidateSignatures(ctx context.Context, formats strfmt.Registry) error {
+
+ if err := validate.ReadOnly(ctx, "signatures", "body", m.Signatures); err != nil {
+ return err
+ }
+
+ for i := 0; i < len(m.Signatures); i++ {
+
+ if m.Signatures[i] != nil {
+
+ if swag.IsZero(m.Signatures[i]) { // not required
+ return nil
+ }
+
+ if err := m.Signatures[i].ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("signatures" + "." + strconv.Itoa(i))
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("signatures" + "." + strconv.Itoa(i))
+ }
+
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *DSSEV001Schema) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *DSSEV001Schema) UnmarshalBinary(b []byte) error {
+ var res DSSEV001Schema
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// DSSEV001SchemaEnvelopeHash Specifies the hash algorithm and value encompassing the entire envelope sent to Rekor
+//
+// swagger:model DSSEV001SchemaEnvelopeHash
+type DSSEV001SchemaEnvelopeHash struct {
+
+ // The hashing function used to compute the hash value
+ // Required: true
+ // Enum: ["sha256"]
+ Algorithm *string `json:"algorithm"`
+
+ // The value of the computed digest over the entire envelope
+ // Required: true
+ Value *string `json:"value"`
+}
+
+// Validate validates this DSSE v001 schema envelope hash
+func (m *DSSEV001SchemaEnvelopeHash) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAlgorithm(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateValue(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+var dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum []any
+
+func init() {
+ var res []string
+ if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil {
+ panic(err)
+ }
+ for _, v := range res {
+ dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum = append(dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum, v)
+ }
+}
+
+const (
+
+ // DSSEV001SchemaEnvelopeHashAlgorithmSha256 captures enum value "sha256"
+ DSSEV001SchemaEnvelopeHashAlgorithmSha256 string = "sha256"
+)
+
+// prop value enum
+func (m *DSSEV001SchemaEnvelopeHash) validateAlgorithmEnum(path, location string, value string) error {
+ if err := validate.EnumCase(path, location, value, dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *DSSEV001SchemaEnvelopeHash) validateAlgorithm(formats strfmt.Registry) error {
+
+ if err := validate.Required("envelopeHash"+"."+"algorithm", "body", m.Algorithm); err != nil {
+ return err
+ }
+
+ // value enum
+ if err := m.validateAlgorithmEnum("envelopeHash"+"."+"algorithm", "body", *m.Algorithm); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *DSSEV001SchemaEnvelopeHash) validateValue(formats strfmt.Registry) error {
+
+ if err := validate.Required("envelopeHash"+"."+"value", "body", m.Value); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validate this DSSE v001 schema envelope hash based on the context it is used
+func (m *DSSEV001SchemaEnvelopeHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *DSSEV001SchemaEnvelopeHash) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *DSSEV001SchemaEnvelopeHash) UnmarshalBinary(b []byte) error {
+ var res DSSEV001SchemaEnvelopeHash
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// DSSEV001SchemaPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope
+//
+// swagger:model DSSEV001SchemaPayloadHash
+type DSSEV001SchemaPayloadHash struct {
+
+ // The hashing function used to compute the hash value
+ // Required: true
+ // Enum: ["sha256"]
+ Algorithm *string `json:"algorithm"`
+
+ // The value of the computed digest over the payload within the envelope
+ // Required: true
+ Value *string `json:"value"`
+}
+
+// Validate validates this DSSE v001 schema payload hash
+func (m *DSSEV001SchemaPayloadHash) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAlgorithm(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateValue(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+var dsseV001SchemaPayloadHashTypeAlgorithmPropEnum []any
+
+func init() {
+ var res []string
+ if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil {
+ panic(err)
+ }
+ for _, v := range res {
+ dsseV001SchemaPayloadHashTypeAlgorithmPropEnum = append(dsseV001SchemaPayloadHashTypeAlgorithmPropEnum, v)
+ }
+}
+
+const (
+
+ // DSSEV001SchemaPayloadHashAlgorithmSha256 captures enum value "sha256"
+ DSSEV001SchemaPayloadHashAlgorithmSha256 string = "sha256"
+)
+
+// prop value enum
+func (m *DSSEV001SchemaPayloadHash) validateAlgorithmEnum(path, location string, value string) error {
+ if err := validate.EnumCase(path, location, value, dsseV001SchemaPayloadHashTypeAlgorithmPropEnum, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *DSSEV001SchemaPayloadHash) validateAlgorithm(formats strfmt.Registry) error {
+
+ if err := validate.Required("payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil {
+ return err
+ }
+
+ // value enum
+ if err := m.validateAlgorithmEnum("payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *DSSEV001SchemaPayloadHash) validateValue(formats strfmt.Registry) error {
+
+ if err := validate.Required("payloadHash"+"."+"value", "body", m.Value); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validate this DSSE v001 schema payload hash based on the context it is used
+func (m *DSSEV001SchemaPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *DSSEV001SchemaPayloadHash) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *DSSEV001SchemaPayloadHash) UnmarshalBinary(b []byte) error {
+ var res DSSEV001SchemaPayloadHash
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// DSSEV001SchemaProposedContent DSSE v001 schema proposed content
+//
+// swagger:model DSSEV001SchemaProposedContent
+type DSSEV001SchemaProposedContent struct {
+
+ // DSSE envelope specified as a stringified JSON object
+ // Required: true
+ Envelope *string `json:"envelope"`
+
+ // collection of all verification material (e.g. public keys or certificates) used to verify signatures over envelope's payload, specified as base64-encoded strings
+ // Required: true
+ // Min Items: 1
+ Verifiers []strfmt.Base64 `json:"verifiers"`
+}
+
+// Validate validates this DSSE v001 schema proposed content
+func (m *DSSEV001SchemaProposedContent) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateEnvelope(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateVerifiers(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *DSSEV001SchemaProposedContent) validateEnvelope(formats strfmt.Registry) error {
+
+ if err := validate.Required("proposedContent"+"."+"envelope", "body", m.Envelope); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *DSSEV001SchemaProposedContent) validateVerifiers(formats strfmt.Registry) error {
+
+ if err := validate.Required("proposedContent"+"."+"verifiers", "body", m.Verifiers); err != nil {
+ return err
+ }
+
+ iVerifiersSize := int64(len(m.Verifiers))
+
+ if err := validate.MinItems("proposedContent"+"."+"verifiers", "body", iVerifiersSize, 1); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validates this DSSE v001 schema proposed content based on context it is used
+func (m *DSSEV001SchemaProposedContent) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *DSSEV001SchemaProposedContent) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *DSSEV001SchemaProposedContent) UnmarshalBinary(b []byte) error {
+ var res DSSEV001SchemaProposedContent
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// DSSEV001SchemaSignaturesItems0 a signature of the envelope's payload along with the verification material for the signature
+//
+// swagger:model DSSEV001SchemaSignaturesItems0
+type DSSEV001SchemaSignaturesItems0 struct {
+
+ // base64 encoded signature of the payload
+ // Required: true
+ // Pattern: ^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=|[A-Za-z0-9+\/]{4})$
+ Signature *string `json:"signature"`
+
+ // verification material that was used to verify the corresponding signature, specified as a base64 encoded string
+ // Required: true
+ // Format: byte
+ Verifier *strfmt.Base64 `json:"verifier"`
+}
+
+// Validate validates this DSSE v001 schema signatures items0
+func (m *DSSEV001SchemaSignaturesItems0) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateSignature(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateVerifier(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *DSSEV001SchemaSignaturesItems0) validateSignature(formats strfmt.Registry) error {
+
+ if err := validate.Required("signature", "body", m.Signature); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("signature", "body", *m.Signature, `^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=|[A-Za-z0-9+\/]{4})$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *DSSEV001SchemaSignaturesItems0) validateVerifier(formats strfmt.Registry) error {
+
+ if err := validate.Required("verifier", "body", m.Verifier); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validates this DSSE v001 schema signatures items0 based on context it is used
+func (m *DSSEV001SchemaSignaturesItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *DSSEV001SchemaSignaturesItems0) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *DSSEV001SchemaSignaturesItems0) UnmarshalBinary(b []byte) error {
+ var res DSSEV001SchemaSignaturesItems0
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/error.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/error.go
new file mode 100644
index 00000000000..ac14f2026e4
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/error.go
@@ -0,0 +1,69 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// Error error
+//
+// swagger:model Error
+type Error struct {
+
+ // code
+ Code int64 `json:"code,omitempty"`
+
+ // message
+ Message string `json:"message,omitempty"`
+}
+
+// Validate validates this error
+func (m *Error) Validate(formats strfmt.Registry) error {
+ return nil
+}
+
+// ContextValidate validates this error based on context it is used
+func (m *Error) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *Error) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *Error) UnmarshalBinary(b []byte) error {
+ var res Error
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord.go
new file mode 100644
index 00000000000..b3e1f8a3bda
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord.go
@@ -0,0 +1,210 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// Hashedrekord Hashed Rekord object
+//
+// swagger:model hashedrekord
+type Hashedrekord struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec HashedrekordSchema `json:"spec"`
+}
+
+// Kind gets the kind of this subtype
+func (m *Hashedrekord) Kind() string {
+ return "hashedrekord"
+}
+
+// SetKind sets the kind of this subtype
+func (m *Hashedrekord) SetKind(val string) {
+}
+
+// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure
+func (m *Hashedrekord) UnmarshalJSON(raw []byte) error {
+ var data struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec HashedrekordSchema `json:"spec"`
+ }
+ buf := bytes.NewBuffer(raw)
+ dec := json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&data); err != nil {
+ return err
+ }
+
+ var base struct {
+ /* Just the base type fields. Used for unmashalling polymorphic types.*/
+
+ Kind string `json:"kind"`
+ }
+ buf = bytes.NewBuffer(raw)
+ dec = json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&base); err != nil {
+ return err
+ }
+
+ var result Hashedrekord
+
+ if base.Kind != result.Kind() {
+ /* Not the type we're looking for. */
+ return errors.New(422, "invalid kind value: %q", base.Kind)
+ }
+
+ result.APIVersion = data.APIVersion
+ result.Spec = data.Spec
+
+ *m = result
+
+ return nil
+}
+
+// MarshalJSON marshals this object with a polymorphic type to a JSON structure
+func (m Hashedrekord) MarshalJSON() ([]byte, error) {
+ var b1, b2, b3 []byte
+ var err error
+ b1, err = json.Marshal(struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec HashedrekordSchema `json:"spec"`
+ }{
+
+ APIVersion: m.APIVersion,
+
+ Spec: m.Spec,
+ })
+ if err != nil {
+ return nil, err
+ }
+ b2, err = json.Marshal(struct {
+ Kind string `json:"kind"`
+ }{
+
+ Kind: m.Kind(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return swag.ConcatJSON(b1, b2, b3), nil
+}
+
+// Validate validates this hashedrekord
+func (m *Hashedrekord) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAPIVersion(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSpec(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *Hashedrekord) validateAPIVersion(formats strfmt.Registry) error {
+
+ if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *Hashedrekord) validateSpec(formats strfmt.Registry) error {
+
+ if m.Spec == nil {
+ return errors.Required("spec", "body", nil)
+ }
+
+ return nil
+}
+
+// ContextValidate validate this hashedrekord based on the context it is used
+func (m *Hashedrekord) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *Hashedrekord) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *Hashedrekord) UnmarshalBinary(b []byte) error {
+ var res Hashedrekord
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go
new file mode 100644
index 00000000000..67fc8ababad
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go
@@ -0,0 +1,29 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// HashedrekordSchema Hashedrekord Schema
+//
+// # Schema for Hashedrekord objects
+//
+// swagger:model hashedrekordSchema
+type HashedrekordSchema any
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go
new file mode 100644
index 00000000000..866842e5690
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go
@@ -0,0 +1,552 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "encoding/json"
+ stderrors "errors"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// HashedrekordV001Schema Hashed Rekor v0.0.1 Schema
+//
+// # Schema for Hashed Rekord object
+//
+// swagger:model hashedrekordV001Schema
+type HashedrekordV001Schema struct {
+
+ // data
+ // Required: true
+ Data *HashedrekordV001SchemaData `json:"data"`
+
+ // signature
+ // Required: true
+ Signature *HashedrekordV001SchemaSignature `json:"signature"`
+}
+
+// Validate validates this hashedrekord v001 schema
+func (m *HashedrekordV001Schema) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateData(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSignature(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *HashedrekordV001Schema) validateData(formats strfmt.Registry) error {
+
+ if err := validate.Required("data", "body", m.Data); err != nil {
+ return err
+ }
+
+ if m.Data != nil {
+ if err := m.Data.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("data")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("data")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *HashedrekordV001Schema) validateSignature(formats strfmt.Registry) error {
+
+ if err := validate.Required("signature", "body", m.Signature); err != nil {
+ return err
+ }
+
+ if m.Signature != nil {
+ if err := m.Signature.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("signature")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("signature")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this hashedrekord v001 schema based on the context it is used
+func (m *HashedrekordV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateData(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateSignature(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *HashedrekordV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Data != nil {
+
+ if err := m.Data.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("data")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("data")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *HashedrekordV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Signature != nil {
+
+ if err := m.Signature.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("signature")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("signature")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *HashedrekordV001Schema) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *HashedrekordV001Schema) UnmarshalBinary(b []byte) error {
+ var res HashedrekordV001Schema
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// HashedrekordV001SchemaData Information about the content associated with the entry
+//
+// swagger:model HashedrekordV001SchemaData
+type HashedrekordV001SchemaData struct {
+
+ // hash
+ Hash *HashedrekordV001SchemaDataHash `json:"hash,omitempty"`
+}
+
+// Validate validates this hashedrekord v001 schema data
+func (m *HashedrekordV001SchemaData) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *HashedrekordV001SchemaData) validateHash(formats strfmt.Registry) error {
+ if swag.IsZero(m.Hash) { // not required
+ return nil
+ }
+
+ if m.Hash != nil {
+ if err := m.Hash.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("data" + "." + "hash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("data" + "." + "hash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this hashedrekord v001 schema data based on the context it is used
+func (m *HashedrekordV001SchemaData) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateHash(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *HashedrekordV001SchemaData) contextValidateHash(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Hash != nil {
+
+ if swag.IsZero(m.Hash) { // not required
+ return nil
+ }
+
+ if err := m.Hash.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("data" + "." + "hash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("data" + "." + "hash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *HashedrekordV001SchemaData) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *HashedrekordV001SchemaData) UnmarshalBinary(b []byte) error {
+ var res HashedrekordV001SchemaData
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// HashedrekordV001SchemaDataHash Specifies the hash algorithm and value for the content
+//
+// swagger:model HashedrekordV001SchemaDataHash
+type HashedrekordV001SchemaDataHash struct {
+
+ // The hashing function used to compute the hash value
+ // Required: true
+ // Enum: ["sha256","sha384","sha512"]
+ Algorithm *string `json:"algorithm"`
+
+ // The hash value for the content, as represented by a lower case hexadecimal string
+ // Required: true
+ Value *string `json:"value"`
+}
+
+// Validate validates this hashedrekord v001 schema data hash
+func (m *HashedrekordV001SchemaDataHash) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAlgorithm(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateValue(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+var hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum []any
+
+func init() {
+ var res []string
+ if err := json.Unmarshal([]byte(`["sha256","sha384","sha512"]`), &res); err != nil {
+ panic(err)
+ }
+ for _, v := range res {
+ hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum = append(hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum, v)
+ }
+}
+
+const (
+
+ // HashedrekordV001SchemaDataHashAlgorithmSha256 captures enum value "sha256"
+ HashedrekordV001SchemaDataHashAlgorithmSha256 string = "sha256"
+
+ // HashedrekordV001SchemaDataHashAlgorithmSha384 captures enum value "sha384"
+ HashedrekordV001SchemaDataHashAlgorithmSha384 string = "sha384"
+
+ // HashedrekordV001SchemaDataHashAlgorithmSha512 captures enum value "sha512"
+ HashedrekordV001SchemaDataHashAlgorithmSha512 string = "sha512"
+)
+
+// prop value enum
+func (m *HashedrekordV001SchemaDataHash) validateAlgorithmEnum(path, location string, value string) error {
+ if err := validate.EnumCase(path, location, value, hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *HashedrekordV001SchemaDataHash) validateAlgorithm(formats strfmt.Registry) error {
+
+ if err := validate.Required("data"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil {
+ return err
+ }
+
+ // value enum
+ if err := m.validateAlgorithmEnum("data"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *HashedrekordV001SchemaDataHash) validateValue(formats strfmt.Registry) error {
+
+ if err := validate.Required("data"+"."+"hash"+"."+"value", "body", m.Value); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validates this hashedrekord v001 schema data hash based on context it is used
+func (m *HashedrekordV001SchemaDataHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *HashedrekordV001SchemaDataHash) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *HashedrekordV001SchemaDataHash) UnmarshalBinary(b []byte) error {
+ var res HashedrekordV001SchemaDataHash
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// HashedrekordV001SchemaSignature Information about the detached signature associated with the entry
+//
+// swagger:model HashedrekordV001SchemaSignature
+type HashedrekordV001SchemaSignature struct {
+
+ // Specifies the content of the signature inline within the document
+ // Format: byte
+ Content strfmt.Base64 `json:"content,omitempty"`
+
+ // public key
+ PublicKey *HashedrekordV001SchemaSignaturePublicKey `json:"publicKey,omitempty"`
+}
+
+// Validate validates this hashedrekord v001 schema signature
+func (m *HashedrekordV001SchemaSignature) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validatePublicKey(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *HashedrekordV001SchemaSignature) validatePublicKey(formats strfmt.Registry) error {
+ if swag.IsZero(m.PublicKey) { // not required
+ return nil
+ }
+
+ if m.PublicKey != nil {
+ if err := m.PublicKey.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("signature" + "." + "publicKey")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("signature" + "." + "publicKey")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this hashedrekord v001 schema signature based on the context it is used
+func (m *HashedrekordV001SchemaSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidatePublicKey(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *HashedrekordV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.PublicKey != nil {
+
+ if swag.IsZero(m.PublicKey) { // not required
+ return nil
+ }
+
+ if err := m.PublicKey.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("signature" + "." + "publicKey")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("signature" + "." + "publicKey")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *HashedrekordV001SchemaSignature) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *HashedrekordV001SchemaSignature) UnmarshalBinary(b []byte) error {
+ var res HashedrekordV001SchemaSignature
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// HashedrekordV001SchemaSignaturePublicKey The public key that can verify the signature; this can also be an X509 code signing certificate that contains the raw public key information
+//
+// swagger:model HashedrekordV001SchemaSignaturePublicKey
+type HashedrekordV001SchemaSignaturePublicKey struct {
+
+ // Specifies the content of the public key or code signing certificate inline within the document
+ // Format: byte
+ Content strfmt.Base64 `json:"content,omitempty"`
+}
+
+// Validate validates this hashedrekord v001 schema signature public key
+func (m *HashedrekordV001SchemaSignaturePublicKey) Validate(formats strfmt.Registry) error {
+ return nil
+}
+
+// ContextValidate validates this hashedrekord v001 schema signature public key based on context it is used
+func (m *HashedrekordV001SchemaSignaturePublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *HashedrekordV001SchemaSignaturePublicKey) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *HashedrekordV001SchemaSignaturePublicKey) UnmarshalBinary(b []byte) error {
+ var res HashedrekordV001SchemaSignaturePublicKey
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm.go
new file mode 100644
index 00000000000..d19b8bc8c9d
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm.go
@@ -0,0 +1,210 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// Helm Helm chart
+//
+// swagger:model helm
+type Helm struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec HelmSchema `json:"spec"`
+}
+
+// Kind gets the kind of this subtype
+func (m *Helm) Kind() string {
+ return "helm"
+}
+
+// SetKind sets the kind of this subtype
+func (m *Helm) SetKind(val string) {
+}
+
+// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure
+func (m *Helm) UnmarshalJSON(raw []byte) error {
+ var data struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec HelmSchema `json:"spec"`
+ }
+ buf := bytes.NewBuffer(raw)
+ dec := json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&data); err != nil {
+ return err
+ }
+
+ var base struct {
+ /* Just the base type fields. Used for unmashalling polymorphic types.*/
+
+ Kind string `json:"kind"`
+ }
+ buf = bytes.NewBuffer(raw)
+ dec = json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&base); err != nil {
+ return err
+ }
+
+ var result Helm
+
+ if base.Kind != result.Kind() {
+ /* Not the type we're looking for. */
+ return errors.New(422, "invalid kind value: %q", base.Kind)
+ }
+
+ result.APIVersion = data.APIVersion
+ result.Spec = data.Spec
+
+ *m = result
+
+ return nil
+}
+
+// MarshalJSON marshals this object with a polymorphic type to a JSON structure
+func (m Helm) MarshalJSON() ([]byte, error) {
+ var b1, b2, b3 []byte
+ var err error
+ b1, err = json.Marshal(struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec HelmSchema `json:"spec"`
+ }{
+
+ APIVersion: m.APIVersion,
+
+ Spec: m.Spec,
+ })
+ if err != nil {
+ return nil, err
+ }
+ b2, err = json.Marshal(struct {
+ Kind string `json:"kind"`
+ }{
+
+ Kind: m.Kind(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return swag.ConcatJSON(b1, b2, b3), nil
+}
+
+// Validate validates this helm
+func (m *Helm) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAPIVersion(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSpec(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *Helm) validateAPIVersion(formats strfmt.Registry) error {
+
+ if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *Helm) validateSpec(formats strfmt.Registry) error {
+
+ if m.Spec == nil {
+ return errors.Required("spec", "body", nil)
+ }
+
+ return nil
+}
+
+// ContextValidate validate this helm based on the context it is used
+func (m *Helm) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *Helm) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *Helm) UnmarshalBinary(b []byte) error {
+ var res Helm
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_schema.go
new file mode 100644
index 00000000000..305a9e16fde
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_schema.go
@@ -0,0 +1,29 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// HelmSchema Helm Schema
+//
+// # Schema for Helm objects
+//
+// swagger:model helmSchema
+type HelmSchema any
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go
new file mode 100644
index 00000000000..1d52e1e4f56
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go
@@ -0,0 +1,703 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "encoding/json"
+ stderrors "errors"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// HelmV001Schema Helm v0.0.1 Schema
+//
+// # Schema for Helm object
+//
+// swagger:model helmV001Schema
+type HelmV001Schema struct {
+
+ // chart
+ // Required: true
+ Chart *HelmV001SchemaChart `json:"chart"`
+
+ // public key
+ // Required: true
+ PublicKey *HelmV001SchemaPublicKey `json:"publicKey"`
+}
+
+// Validate validates this helm v001 schema
+func (m *HelmV001Schema) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateChart(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validatePublicKey(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *HelmV001Schema) validateChart(formats strfmt.Registry) error {
+
+ if err := validate.Required("chart", "body", m.Chart); err != nil {
+ return err
+ }
+
+ if m.Chart != nil {
+ if err := m.Chart.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("chart")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("chart")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *HelmV001Schema) validatePublicKey(formats strfmt.Registry) error {
+
+ if err := validate.Required("publicKey", "body", m.PublicKey); err != nil {
+ return err
+ }
+
+ if m.PublicKey != nil {
+ if err := m.PublicKey.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("publicKey")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("publicKey")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this helm v001 schema based on the context it is used
+func (m *HelmV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateChart(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidatePublicKey(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *HelmV001Schema) contextValidateChart(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Chart != nil {
+
+ if err := m.Chart.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("chart")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("chart")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *HelmV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.PublicKey != nil {
+
+ if err := m.PublicKey.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("publicKey")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("publicKey")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *HelmV001Schema) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *HelmV001Schema) UnmarshalBinary(b []byte) error {
+ var res HelmV001Schema
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// HelmV001SchemaChart Information about the Helm chart associated with the entry
+//
+// swagger:model HelmV001SchemaChart
+type HelmV001SchemaChart struct {
+
+ // hash
+ Hash *HelmV001SchemaChartHash `json:"hash,omitempty"`
+
+ // provenance
+ // Required: true
+ Provenance *HelmV001SchemaChartProvenance `json:"provenance"`
+}
+
+// Validate validates this helm v001 schema chart
+func (m *HelmV001SchemaChart) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateProvenance(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *HelmV001SchemaChart) validateHash(formats strfmt.Registry) error {
+ if swag.IsZero(m.Hash) { // not required
+ return nil
+ }
+
+ if m.Hash != nil {
+ if err := m.Hash.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("chart" + "." + "hash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("chart" + "." + "hash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *HelmV001SchemaChart) validateProvenance(formats strfmt.Registry) error {
+
+ if err := validate.Required("chart"+"."+"provenance", "body", m.Provenance); err != nil {
+ return err
+ }
+
+ if m.Provenance != nil {
+ if err := m.Provenance.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("chart" + "." + "provenance")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("chart" + "." + "provenance")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this helm v001 schema chart based on the context it is used
+func (m *HelmV001SchemaChart) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateHash(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateProvenance(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *HelmV001SchemaChart) contextValidateHash(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Hash != nil {
+
+ if swag.IsZero(m.Hash) { // not required
+ return nil
+ }
+
+ if err := m.Hash.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("chart" + "." + "hash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("chart" + "." + "hash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *HelmV001SchemaChart) contextValidateProvenance(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Provenance != nil {
+
+ if err := m.Provenance.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("chart" + "." + "provenance")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("chart" + "." + "provenance")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *HelmV001SchemaChart) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *HelmV001SchemaChart) UnmarshalBinary(b []byte) error {
+ var res HelmV001SchemaChart
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// HelmV001SchemaChartHash Specifies the hash algorithm and value for the chart
+//
+// swagger:model HelmV001SchemaChartHash
+type HelmV001SchemaChartHash struct {
+
+ // The hashing function used to compute the hash value
+ // Required: true
+ // Enum: ["sha256"]
+ Algorithm *string `json:"algorithm"`
+
+ // The hash value for the chart
+ // Required: true
+ Value *string `json:"value"`
+}
+
+// Validate validates this helm v001 schema chart hash
+func (m *HelmV001SchemaChartHash) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAlgorithm(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateValue(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+var helmV001SchemaChartHashTypeAlgorithmPropEnum []any
+
+func init() {
+ var res []string
+ if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil {
+ panic(err)
+ }
+ for _, v := range res {
+ helmV001SchemaChartHashTypeAlgorithmPropEnum = append(helmV001SchemaChartHashTypeAlgorithmPropEnum, v)
+ }
+}
+
+const (
+
+ // HelmV001SchemaChartHashAlgorithmSha256 captures enum value "sha256"
+ HelmV001SchemaChartHashAlgorithmSha256 string = "sha256"
+)
+
+// prop value enum
+func (m *HelmV001SchemaChartHash) validateAlgorithmEnum(path, location string, value string) error {
+ if err := validate.EnumCase(path, location, value, helmV001SchemaChartHashTypeAlgorithmPropEnum, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *HelmV001SchemaChartHash) validateAlgorithm(formats strfmt.Registry) error {
+
+ if err := validate.Required("chart"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil {
+ return err
+ }
+
+ // value enum
+ if err := m.validateAlgorithmEnum("chart"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *HelmV001SchemaChartHash) validateValue(formats strfmt.Registry) error {
+
+ if err := validate.Required("chart"+"."+"hash"+"."+"value", "body", m.Value); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validate this helm v001 schema chart hash based on the context it is used
+func (m *HelmV001SchemaChartHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *HelmV001SchemaChartHash) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *HelmV001SchemaChartHash) UnmarshalBinary(b []byte) error {
+ var res HelmV001SchemaChartHash
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// HelmV001SchemaChartProvenance The provenance entry associated with the signed Helm Chart
+//
+// swagger:model HelmV001SchemaChartProvenance
+type HelmV001SchemaChartProvenance struct {
+
+ // Specifies the content of the provenance file inline within the document
+ // Format: byte
+ Content strfmt.Base64 `json:"content,omitempty"`
+
+ // signature
+ Signature *HelmV001SchemaChartProvenanceSignature `json:"signature,omitempty"`
+}
+
+// Validate validates this helm v001 schema chart provenance
+func (m *HelmV001SchemaChartProvenance) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateSignature(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *HelmV001SchemaChartProvenance) validateSignature(formats strfmt.Registry) error {
+ if swag.IsZero(m.Signature) { // not required
+ return nil
+ }
+
+ if m.Signature != nil {
+ if err := m.Signature.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("chart" + "." + "provenance" + "." + "signature")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("chart" + "." + "provenance" + "." + "signature")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this helm v001 schema chart provenance based on the context it is used
+func (m *HelmV001SchemaChartProvenance) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateSignature(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *HelmV001SchemaChartProvenance) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Signature != nil {
+
+ if swag.IsZero(m.Signature) { // not required
+ return nil
+ }
+
+ if err := m.Signature.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("chart" + "." + "provenance" + "." + "signature")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("chart" + "." + "provenance" + "." + "signature")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *HelmV001SchemaChartProvenance) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *HelmV001SchemaChartProvenance) UnmarshalBinary(b []byte) error {
+ var res HelmV001SchemaChartProvenance
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// HelmV001SchemaChartProvenanceSignature Information about the included signature in the provenance file
+//
+// swagger:model HelmV001SchemaChartProvenanceSignature
+type HelmV001SchemaChartProvenanceSignature struct {
+
+ // Specifies the signature embedded within the provenance file
+ // Required: true
+ // Read Only: true
+ // Format: byte
+ Content strfmt.Base64 `json:"content"`
+}
+
+// Validate validates this helm v001 schema chart provenance signature
+func (m *HelmV001SchemaChartProvenanceSignature) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateContent(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *HelmV001SchemaChartProvenanceSignature) validateContent(formats strfmt.Registry) error {
+
+ if err := validate.Required("chart"+"."+"provenance"+"."+"signature"+"."+"content", "body", m.Content); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validate this helm v001 schema chart provenance signature based on the context it is used
+func (m *HelmV001SchemaChartProvenanceSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateContent(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *HelmV001SchemaChartProvenanceSignature) contextValidateContent(ctx context.Context, formats strfmt.Registry) error {
+
+ if err := validate.ReadOnly(ctx, "chart"+"."+"provenance"+"."+"signature"+"."+"content", "body", m.Content); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *HelmV001SchemaChartProvenanceSignature) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *HelmV001SchemaChartProvenanceSignature) UnmarshalBinary(b []byte) error {
+ var res HelmV001SchemaChartProvenanceSignature
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// HelmV001SchemaPublicKey The public key that can verify the package signature
+//
+// swagger:model HelmV001SchemaPublicKey
+type HelmV001SchemaPublicKey struct {
+
+ // Specifies the content of the public key inline within the document
+ // Required: true
+ // Format: byte
+ Content *strfmt.Base64 `json:"content"`
+}
+
+// Validate validates this helm v001 schema public key
+func (m *HelmV001SchemaPublicKey) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateContent(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *HelmV001SchemaPublicKey) validateContent(formats strfmt.Registry) error {
+
+ if err := validate.Required("publicKey"+"."+"content", "body", m.Content); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validates this helm v001 schema public key based on context it is used
+func (m *HelmV001SchemaPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *HelmV001SchemaPublicKey) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *HelmV001SchemaPublicKey) UnmarshalBinary(b []byte) error {
+ var res HelmV001SchemaPublicKey
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go
new file mode 100644
index 00000000000..c555eb2da64
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go
@@ -0,0 +1,153 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// InactiveShardLogInfo inactive shard log info
+//
+// swagger:model InactiveShardLogInfo
+type InactiveShardLogInfo struct {
+
+ // The current hash value stored at the root of the merkle tree
+ // Required: true
+ // Pattern: ^[0-9a-fA-F]{64}$
+ RootHash *string `json:"rootHash"`
+
+ // The current signed tree head
+ // Required: true
+ SignedTreeHead *string `json:"signedTreeHead"`
+
+ // The current treeID
+ // Required: true
+ // Pattern: ^[0-9]+$
+ TreeID *string `json:"treeID"`
+
+ // The current number of nodes in the merkle tree
+ // Required: true
+ // Minimum: 1
+ TreeSize *int64 `json:"treeSize"`
+}
+
+// Validate validates this inactive shard log info
+func (m *InactiveShardLogInfo) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateRootHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSignedTreeHead(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateTreeID(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateTreeSize(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *InactiveShardLogInfo) validateRootHash(formats strfmt.Registry) error {
+
+ if err := validate.Required("rootHash", "body", m.RootHash); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *InactiveShardLogInfo) validateSignedTreeHead(formats strfmt.Registry) error {
+
+ if err := validate.Required("signedTreeHead", "body", m.SignedTreeHead); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *InactiveShardLogInfo) validateTreeID(formats strfmt.Registry) error {
+
+ if err := validate.Required("treeID", "body", m.TreeID); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("treeID", "body", *m.TreeID, `^[0-9]+$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *InactiveShardLogInfo) validateTreeSize(formats strfmt.Registry) error {
+
+ if err := validate.Required("treeSize", "body", m.TreeSize); err != nil {
+ return err
+ }
+
+ if err := validate.MinimumInt("treeSize", "body", *m.TreeSize, 1, false); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validates this inactive shard log info based on context it is used
+func (m *InactiveShardLogInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *InactiveShardLogInfo) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *InactiveShardLogInfo) UnmarshalBinary(b []byte) error {
+ var res InactiveShardLogInfo
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/inclusion_proof.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/inclusion_proof.go
new file mode 100644
index 00000000000..86f0d7b94e2
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/inclusion_proof.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// InclusionProof inclusion proof
+//
+// swagger:model InclusionProof
+type InclusionProof struct {
+
+ // The checkpoint (signed tree head) that the inclusion proof is based on
+ // Required: true
+ Checkpoint *string `json:"checkpoint"`
+
+ // A list of hashes required to compute the inclusion proof, sorted in order from leaf to root
+ // Required: true
+ Hashes []string `json:"hashes"`
+
+ // The index of the entry in the transparency log
+ // Required: true
+ // Minimum: 0
+ LogIndex *int64 `json:"logIndex"`
+
+ // The hash value stored at the root of the merkle tree at the time the proof was generated
+ // Required: true
+ // Pattern: ^[0-9a-fA-F]{64}$
+ RootHash *string `json:"rootHash"`
+
+ // The size of the merkle tree at the time the inclusion proof was generated
+ // Required: true
+ // Minimum: 1
+ TreeSize *int64 `json:"treeSize"`
+}
+
+// Validate validates this inclusion proof
+func (m *InclusionProof) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateCheckpoint(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateHashes(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateLogIndex(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateRootHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateTreeSize(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *InclusionProof) validateCheckpoint(formats strfmt.Registry) error {
+
+ if err := validate.Required("checkpoint", "body", m.Checkpoint); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *InclusionProof) validateHashes(formats strfmt.Registry) error {
+
+ if err := validate.Required("hashes", "body", m.Hashes); err != nil {
+ return err
+ }
+
+ for i := 0; i < len(m.Hashes); i++ {
+
+ if err := validate.Pattern("hashes"+"."+strconv.Itoa(i), "body", m.Hashes[i], `^[0-9a-fA-F]{64}$`); err != nil {
+ return err
+ }
+
+ }
+
+ return nil
+}
+
+func (m *InclusionProof) validateLogIndex(formats strfmt.Registry) error {
+
+ if err := validate.Required("logIndex", "body", m.LogIndex); err != nil {
+ return err
+ }
+
+ if err := validate.MinimumInt("logIndex", "body", *m.LogIndex, 0, false); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *InclusionProof) validateRootHash(formats strfmt.Registry) error {
+
+ if err := validate.Required("rootHash", "body", m.RootHash); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *InclusionProof) validateTreeSize(formats strfmt.Registry) error {
+
+ if err := validate.Required("treeSize", "body", m.TreeSize); err != nil {
+ return err
+ }
+
+ if err := validate.MinimumInt("treeSize", "body", *m.TreeSize, 1, false); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validates this inclusion proof based on context it is used
+func (m *InclusionProof) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *InclusionProof) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *InclusionProof) UnmarshalBinary(b []byte) error {
+ var res InclusionProof
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto.go
new file mode 100644
index 00000000000..4f208de1d55
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto.go
@@ -0,0 +1,210 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// Intoto Intoto object
+//
+// swagger:model intoto
+type Intoto struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec IntotoSchema `json:"spec"`
+}
+
+// Kind gets the kind of this subtype
+func (m *Intoto) Kind() string {
+ return "intoto"
+}
+
+// SetKind sets the kind of this subtype
+func (m *Intoto) SetKind(val string) {
+}
+
+// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure
+func (m *Intoto) UnmarshalJSON(raw []byte) error {
+ var data struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec IntotoSchema `json:"spec"`
+ }
+ buf := bytes.NewBuffer(raw)
+ dec := json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&data); err != nil {
+ return err
+ }
+
+ var base struct {
+ /* Just the base type fields. Used for unmashalling polymorphic types.*/
+
+ Kind string `json:"kind"`
+ }
+ buf = bytes.NewBuffer(raw)
+ dec = json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&base); err != nil {
+ return err
+ }
+
+ var result Intoto
+
+ if base.Kind != result.Kind() {
+ /* Not the type we're looking for. */
+ return errors.New(422, "invalid kind value: %q", base.Kind)
+ }
+
+ result.APIVersion = data.APIVersion
+ result.Spec = data.Spec
+
+ *m = result
+
+ return nil
+}
+
+// MarshalJSON marshals this object with a polymorphic type to a JSON structure
+func (m Intoto) MarshalJSON() ([]byte, error) {
+ var b1, b2, b3 []byte
+ var err error
+ b1, err = json.Marshal(struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec IntotoSchema `json:"spec"`
+ }{
+
+ APIVersion: m.APIVersion,
+
+ Spec: m.Spec,
+ })
+ if err != nil {
+ return nil, err
+ }
+ b2, err = json.Marshal(struct {
+ Kind string `json:"kind"`
+ }{
+
+ Kind: m.Kind(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return swag.ConcatJSON(b1, b2, b3), nil
+}
+
+// Validate validates this intoto
+func (m *Intoto) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAPIVersion(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSpec(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *Intoto) validateAPIVersion(formats strfmt.Registry) error {
+
+ if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *Intoto) validateSpec(formats strfmt.Registry) error {
+
+ if m.Spec == nil {
+ return errors.Required("spec", "body", nil)
+ }
+
+ return nil
+}
+
+// ContextValidate validate this intoto based on the context it is used
+func (m *Intoto) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *Intoto) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *Intoto) UnmarshalBinary(b []byte) error {
+ var res Intoto
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_schema.go
new file mode 100644
index 00000000000..142f0a19429
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_schema.go
@@ -0,0 +1,29 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// IntotoSchema Intoto Schema
+//
+// # Intoto for Rekord objects
+//
+// swagger:model intotoSchema
+type IntotoSchema any
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go
new file mode 100644
index 00000000000..d3a6ca42f4c
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go
@@ -0,0 +1,539 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "encoding/json"
+ stderrors "errors"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// IntotoV001Schema intoto v0.0.1 Schema
+//
+// # Schema for intoto object
+//
+// swagger:model intotoV001Schema
+type IntotoV001Schema struct {
+
+ // content
+ // Required: true
+ Content *IntotoV001SchemaContent `json:"content"`
+
+ // The public key that can verify the signature
+ // Required: true
+ // Format: byte
+ PublicKey *strfmt.Base64 `json:"publicKey"`
+}
+
+// Validate validates this intoto v001 schema
+func (m *IntotoV001Schema) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateContent(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validatePublicKey(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *IntotoV001Schema) validateContent(formats strfmt.Registry) error {
+
+ if err := validate.Required("content", "body", m.Content); err != nil {
+ return err
+ }
+
+ if m.Content != nil {
+ if err := m.Content.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("content")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("content")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *IntotoV001Schema) validatePublicKey(formats strfmt.Registry) error {
+
+ if err := validate.Required("publicKey", "body", m.PublicKey); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validate this intoto v001 schema based on the context it is used
+func (m *IntotoV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateContent(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *IntotoV001Schema) contextValidateContent(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Content != nil {
+
+ if err := m.Content.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("content")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("content")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *IntotoV001Schema) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *IntotoV001Schema) UnmarshalBinary(b []byte) error {
+ var res IntotoV001Schema
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// IntotoV001SchemaContent intoto v001 schema content
+//
+// swagger:model IntotoV001SchemaContent
+type IntotoV001SchemaContent struct {
+
+ // envelope
+ Envelope string `json:"envelope,omitempty"`
+
+ // hash
+ Hash *IntotoV001SchemaContentHash `json:"hash,omitempty"`
+
+ // payload hash
+ PayloadHash *IntotoV001SchemaContentPayloadHash `json:"payloadHash,omitempty"`
+}
+
+// Validate validates this intoto v001 schema content
+func (m *IntotoV001SchemaContent) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validatePayloadHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *IntotoV001SchemaContent) validateHash(formats strfmt.Registry) error {
+ if swag.IsZero(m.Hash) { // not required
+ return nil
+ }
+
+ if m.Hash != nil {
+ if err := m.Hash.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("content" + "." + "hash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("content" + "." + "hash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *IntotoV001SchemaContent) validatePayloadHash(formats strfmt.Registry) error {
+ if swag.IsZero(m.PayloadHash) { // not required
+ return nil
+ }
+
+ if m.PayloadHash != nil {
+ if err := m.PayloadHash.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("content" + "." + "payloadHash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("content" + "." + "payloadHash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this intoto v001 schema content based on the context it is used
+func (m *IntotoV001SchemaContent) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateHash(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidatePayloadHash(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *IntotoV001SchemaContent) contextValidateHash(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Hash != nil {
+
+ if swag.IsZero(m.Hash) { // not required
+ return nil
+ }
+
+ if err := m.Hash.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("content" + "." + "hash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("content" + "." + "hash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *IntotoV001SchemaContent) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.PayloadHash != nil {
+
+ if swag.IsZero(m.PayloadHash) { // not required
+ return nil
+ }
+
+ if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("content" + "." + "payloadHash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("content" + "." + "payloadHash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *IntotoV001SchemaContent) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *IntotoV001SchemaContent) UnmarshalBinary(b []byte) error {
+ var res IntotoV001SchemaContent
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// IntotoV001SchemaContentHash Specifies the hash algorithm and value encompassing the entire signed envelope; this is computed by the rekor server, client-provided values are ignored
+//
+// swagger:model IntotoV001SchemaContentHash
+type IntotoV001SchemaContentHash struct {
+
+ // The hashing function used to compute the hash value
+ // Required: true
+ // Enum: ["sha256"]
+ Algorithm *string `json:"algorithm"`
+
+ // The hash value for the archive
+ // Required: true
+ Value *string `json:"value"`
+}
+
+// Validate validates this intoto v001 schema content hash
+func (m *IntotoV001SchemaContentHash) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAlgorithm(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateValue(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+var intotoV001SchemaContentHashTypeAlgorithmPropEnum []any
+
+func init() {
+ var res []string
+ if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil {
+ panic(err)
+ }
+ for _, v := range res {
+ intotoV001SchemaContentHashTypeAlgorithmPropEnum = append(intotoV001SchemaContentHashTypeAlgorithmPropEnum, v)
+ }
+}
+
+const (
+
+ // IntotoV001SchemaContentHashAlgorithmSha256 captures enum value "sha256"
+ IntotoV001SchemaContentHashAlgorithmSha256 string = "sha256"
+)
+
+// prop value enum
+func (m *IntotoV001SchemaContentHash) validateAlgorithmEnum(path, location string, value string) error {
+ if err := validate.EnumCase(path, location, value, intotoV001SchemaContentHashTypeAlgorithmPropEnum, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *IntotoV001SchemaContentHash) validateAlgorithm(formats strfmt.Registry) error {
+
+ if err := validate.Required("content"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil {
+ return err
+ }
+
+ // value enum
+ if err := m.validateAlgorithmEnum("content"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *IntotoV001SchemaContentHash) validateValue(formats strfmt.Registry) error {
+
+ if err := validate.Required("content"+"."+"hash"+"."+"value", "body", m.Value); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validate this intoto v001 schema content hash based on the context it is used
+func (m *IntotoV001SchemaContentHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *IntotoV001SchemaContentHash) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *IntotoV001SchemaContentHash) UnmarshalBinary(b []byte) error {
+ var res IntotoV001SchemaContentHash
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// IntotoV001SchemaContentPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope; this is computed by the rekor server, client-provided values are ignored
+//
+// swagger:model IntotoV001SchemaContentPayloadHash
+type IntotoV001SchemaContentPayloadHash struct {
+
+ // The hashing function used to compute the hash value
+ // Required: true
+ // Enum: ["sha256"]
+ Algorithm *string `json:"algorithm"`
+
+ // The hash value for the envelope's payload
+ // Required: true
+ Value *string `json:"value"`
+}
+
+// Validate validates this intoto v001 schema content payload hash
+func (m *IntotoV001SchemaContentPayloadHash) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAlgorithm(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateValue(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+var intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum []any
+
+func init() {
+ var res []string
+ if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil {
+ panic(err)
+ }
+ for _, v := range res {
+ intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum = append(intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum, v)
+ }
+}
+
+const (
+
+ // IntotoV001SchemaContentPayloadHashAlgorithmSha256 captures enum value "sha256"
+ IntotoV001SchemaContentPayloadHashAlgorithmSha256 string = "sha256"
+)
+
+// prop value enum
+func (m *IntotoV001SchemaContentPayloadHash) validateAlgorithmEnum(path, location string, value string) error {
+ if err := validate.EnumCase(path, location, value, intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *IntotoV001SchemaContentPayloadHash) validateAlgorithm(formats strfmt.Registry) error {
+
+ if err := validate.Required("content"+"."+"payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil {
+ return err
+ }
+
+ // value enum
+ if err := m.validateAlgorithmEnum("content"+"."+"payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *IntotoV001SchemaContentPayloadHash) validateValue(formats strfmt.Registry) error {
+
+ if err := validate.Required("content"+"."+"payloadHash"+"."+"value", "body", m.Value); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validate this intoto v001 schema content payload hash based on the context it is used
+func (m *IntotoV001SchemaContentPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *IntotoV001SchemaContentPayloadHash) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *IntotoV001SchemaContentPayloadHash) UnmarshalBinary(b []byte) error {
+ var res IntotoV001SchemaContentPayloadHash
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go
new file mode 100644
index 00000000000..4ea4dcc58a5
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go
@@ -0,0 +1,798 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "encoding/json"
+ stderrors "errors"
+ "strconv"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// IntotoV002Schema intoto v0.0.2 Schema
+//
+// # Schema for intoto object
+//
+// swagger:model intotoV002Schema
+type IntotoV002Schema struct {
+
+ // content
+ // Required: true
+ Content *IntotoV002SchemaContent `json:"content"`
+}
+
+// Validate validates this intoto v002 schema
+func (m *IntotoV002Schema) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateContent(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *IntotoV002Schema) validateContent(formats strfmt.Registry) error {
+
+ if err := validate.Required("content", "body", m.Content); err != nil {
+ return err
+ }
+
+ if m.Content != nil {
+ if err := m.Content.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("content")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("content")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this intoto v002 schema based on the context it is used
+func (m *IntotoV002Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateContent(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *IntotoV002Schema) contextValidateContent(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Content != nil {
+
+ if err := m.Content.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("content")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("content")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *IntotoV002Schema) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *IntotoV002Schema) UnmarshalBinary(b []byte) error {
+ var res IntotoV002Schema
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// IntotoV002SchemaContent intoto v002 schema content
+//
+// swagger:model IntotoV002SchemaContent
+type IntotoV002SchemaContent struct {
+
+ // envelope
+ // Required: true
+ Envelope *IntotoV002SchemaContentEnvelope `json:"envelope"`
+
+ // hash
+ Hash *IntotoV002SchemaContentHash `json:"hash,omitempty"`
+
+ // payload hash
+ PayloadHash *IntotoV002SchemaContentPayloadHash `json:"payloadHash,omitempty"`
+}
+
+// Validate validates this intoto v002 schema content
+func (m *IntotoV002SchemaContent) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateEnvelope(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validatePayloadHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *IntotoV002SchemaContent) validateEnvelope(formats strfmt.Registry) error {
+
+ if err := validate.Required("content"+"."+"envelope", "body", m.Envelope); err != nil {
+ return err
+ }
+
+ if m.Envelope != nil {
+ if err := m.Envelope.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("content" + "." + "envelope")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("content" + "." + "envelope")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *IntotoV002SchemaContent) validateHash(formats strfmt.Registry) error {
+ if swag.IsZero(m.Hash) { // not required
+ return nil
+ }
+
+ if m.Hash != nil {
+ if err := m.Hash.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("content" + "." + "hash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("content" + "." + "hash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *IntotoV002SchemaContent) validatePayloadHash(formats strfmt.Registry) error {
+ if swag.IsZero(m.PayloadHash) { // not required
+ return nil
+ }
+
+ if m.PayloadHash != nil {
+ if err := m.PayloadHash.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("content" + "." + "payloadHash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("content" + "." + "payloadHash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this intoto v002 schema content based on the context it is used
+func (m *IntotoV002SchemaContent) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateEnvelope(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateHash(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidatePayloadHash(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *IntotoV002SchemaContent) contextValidateEnvelope(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Envelope != nil {
+
+ if err := m.Envelope.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("content" + "." + "envelope")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("content" + "." + "envelope")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *IntotoV002SchemaContent) contextValidateHash(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Hash != nil {
+
+ if swag.IsZero(m.Hash) { // not required
+ return nil
+ }
+
+ if err := m.Hash.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("content" + "." + "hash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("content" + "." + "hash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *IntotoV002SchemaContent) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.PayloadHash != nil {
+
+ if swag.IsZero(m.PayloadHash) { // not required
+ return nil
+ }
+
+ if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("content" + "." + "payloadHash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("content" + "." + "payloadHash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *IntotoV002SchemaContent) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *IntotoV002SchemaContent) UnmarshalBinary(b []byte) error {
+ var res IntotoV002SchemaContent
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// IntotoV002SchemaContentEnvelope dsse envelope
+//
+// swagger:model IntotoV002SchemaContentEnvelope
+type IntotoV002SchemaContentEnvelope struct {
+
+ // payload of the envelope
+ // Format: byte
+ Payload strfmt.Base64 `json:"payload,omitempty"`
+
+ // type describing the payload
+ // Required: true
+ PayloadType *string `json:"payloadType"`
+
+ // collection of all signatures of the envelope's payload
+ // Required: true
+ // Min Items: 1
+ Signatures []*IntotoV002SchemaContentEnvelopeSignaturesItems0 `json:"signatures"`
+}
+
+// Validate validates this intoto v002 schema content envelope
+func (m *IntotoV002SchemaContentEnvelope) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validatePayloadType(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSignatures(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *IntotoV002SchemaContentEnvelope) validatePayloadType(formats strfmt.Registry) error {
+
+ if err := validate.Required("content"+"."+"envelope"+"."+"payloadType", "body", m.PayloadType); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *IntotoV002SchemaContentEnvelope) validateSignatures(formats strfmt.Registry) error {
+
+ if err := validate.Required("content"+"."+"envelope"+"."+"signatures", "body", m.Signatures); err != nil {
+ return err
+ }
+
+ iSignaturesSize := int64(len(m.Signatures))
+
+ if err := validate.MinItems("content"+"."+"envelope"+"."+"signatures", "body", iSignaturesSize, 1); err != nil {
+ return err
+ }
+
+ for i := 0; i < len(m.Signatures); i++ {
+ if swag.IsZero(m.Signatures[i]) { // not required
+ continue
+ }
+
+ if m.Signatures[i] != nil {
+ if err := m.Signatures[i].Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i))
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i))
+ }
+
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// ContextValidate validate this intoto v002 schema content envelope based on the context it is used
+func (m *IntotoV002SchemaContentEnvelope) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateSignatures(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *IntotoV002SchemaContentEnvelope) contextValidateSignatures(ctx context.Context, formats strfmt.Registry) error {
+
+ for i := 0; i < len(m.Signatures); i++ {
+
+ if m.Signatures[i] != nil {
+
+ if swag.IsZero(m.Signatures[i]) { // not required
+ return nil
+ }
+
+ if err := m.Signatures[i].ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i))
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i))
+ }
+
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *IntotoV002SchemaContentEnvelope) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *IntotoV002SchemaContentEnvelope) UnmarshalBinary(b []byte) error {
+ var res IntotoV002SchemaContentEnvelope
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// IntotoV002SchemaContentEnvelopeSignaturesItems0 a signature of the envelope's payload along with the public key for the signature
+//
+// swagger:model IntotoV002SchemaContentEnvelopeSignaturesItems0
+type IntotoV002SchemaContentEnvelopeSignaturesItems0 struct {
+
+ // optional id of the key used to create the signature
+ Keyid string `json:"keyid,omitempty"`
+
+ // public key that corresponds to this signature
+ // Required: true
+ // Format: byte
+ PublicKey *strfmt.Base64 `json:"publicKey"`
+
+ // signature of the payload
+ // Required: true
+ // Format: byte
+ Sig *strfmt.Base64 `json:"sig"`
+}
+
+// Validate validates this intoto v002 schema content envelope signatures items0
+func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validatePublicKey(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSig(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) validatePublicKey(formats strfmt.Registry) error {
+
+ if err := validate.Required("publicKey", "body", m.PublicKey); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) validateSig(formats strfmt.Registry) error {
+
+ if err := validate.Required("sig", "body", m.Sig); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validates this intoto v002 schema content envelope signatures items0 based on context it is used
+func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) UnmarshalBinary(b []byte) error {
+ var res IntotoV002SchemaContentEnvelopeSignaturesItems0
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// IntotoV002SchemaContentHash Specifies the hash algorithm and value encompassing the entire signed envelope
+//
+// swagger:model IntotoV002SchemaContentHash
+type IntotoV002SchemaContentHash struct {
+
+ // The hashing function used to compute the hash value
+ // Required: true
+ // Enum: ["sha256"]
+ Algorithm *string `json:"algorithm"`
+
+ // The hash value for the archive
+ // Required: true
+ Value *string `json:"value"`
+}
+
+// Validate validates this intoto v002 schema content hash
+func (m *IntotoV002SchemaContentHash) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAlgorithm(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateValue(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+var intotoV002SchemaContentHashTypeAlgorithmPropEnum []any
+
+func init() {
+ var res []string
+ if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil {
+ panic(err)
+ }
+ for _, v := range res {
+ intotoV002SchemaContentHashTypeAlgorithmPropEnum = append(intotoV002SchemaContentHashTypeAlgorithmPropEnum, v)
+ }
+}
+
+const (
+
+ // IntotoV002SchemaContentHashAlgorithmSha256 captures enum value "sha256"
+ IntotoV002SchemaContentHashAlgorithmSha256 string = "sha256"
+)
+
+// prop value enum
+func (m *IntotoV002SchemaContentHash) validateAlgorithmEnum(path, location string, value string) error {
+ if err := validate.EnumCase(path, location, value, intotoV002SchemaContentHashTypeAlgorithmPropEnum, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *IntotoV002SchemaContentHash) validateAlgorithm(formats strfmt.Registry) error {
+
+ if err := validate.Required("content"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil {
+ return err
+ }
+
+ // value enum
+ if err := m.validateAlgorithmEnum("content"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *IntotoV002SchemaContentHash) validateValue(formats strfmt.Registry) error {
+
+ if err := validate.Required("content"+"."+"hash"+"."+"value", "body", m.Value); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validate this intoto v002 schema content hash based on the context it is used
+func (m *IntotoV002SchemaContentHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *IntotoV002SchemaContentHash) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *IntotoV002SchemaContentHash) UnmarshalBinary(b []byte) error {
+ var res IntotoV002SchemaContentHash
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// IntotoV002SchemaContentPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope
+//
+// swagger:model IntotoV002SchemaContentPayloadHash
+type IntotoV002SchemaContentPayloadHash struct {
+
+ // The hashing function used to compute the hash value
+ // Required: true
+ // Enum: ["sha256"]
+ Algorithm *string `json:"algorithm"`
+
+ // The hash value of the payload
+ // Required: true
+ Value *string `json:"value"`
+}
+
+// Validate validates this intoto v002 schema content payload hash
+func (m *IntotoV002SchemaContentPayloadHash) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAlgorithm(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateValue(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+var intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum []any
+
+func init() {
+ var res []string
+ if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil {
+ panic(err)
+ }
+ for _, v := range res {
+ intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum = append(intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum, v)
+ }
+}
+
+const (
+
+ // IntotoV002SchemaContentPayloadHashAlgorithmSha256 captures enum value "sha256"
+ IntotoV002SchemaContentPayloadHashAlgorithmSha256 string = "sha256"
+)
+
+// prop value enum
+func (m *IntotoV002SchemaContentPayloadHash) validateAlgorithmEnum(path, location string, value string) error {
+ if err := validate.EnumCase(path, location, value, intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *IntotoV002SchemaContentPayloadHash) validateAlgorithm(formats strfmt.Registry) error {
+
+ if err := validate.Required("content"+"."+"payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil {
+ return err
+ }
+
+ // value enum
+ if err := m.validateAlgorithmEnum("content"+"."+"payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *IntotoV002SchemaContentPayloadHash) validateValue(formats strfmt.Registry) error {
+
+ if err := validate.Required("content"+"."+"payloadHash"+"."+"value", "body", m.Value); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validate this intoto v002 schema content payload hash based on the context it is used
+func (m *IntotoV002SchemaContentPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *IntotoV002SchemaContentPayloadHash) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *IntotoV002SchemaContentPayloadHash) UnmarshalBinary(b []byte) error {
+ var res IntotoV002SchemaContentPayloadHash
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar.go
new file mode 100644
index 00000000000..3df3d21b8a4
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar.go
@@ -0,0 +1,210 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// Jar Java Archive (JAR)
+//
+// swagger:model jar
+type Jar struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec JarSchema `json:"spec"`
+}
+
+// Kind gets the kind of this subtype
+func (m *Jar) Kind() string {
+ return "jar"
+}
+
+// SetKind sets the kind of this subtype
+func (m *Jar) SetKind(val string) {
+}
+
+// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure
+func (m *Jar) UnmarshalJSON(raw []byte) error {
+ var data struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec JarSchema `json:"spec"`
+ }
+ buf := bytes.NewBuffer(raw)
+ dec := json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&data); err != nil {
+ return err
+ }
+
+ var base struct {
+ /* Just the base type fields. Used for unmashalling polymorphic types.*/
+
+ Kind string `json:"kind"`
+ }
+ buf = bytes.NewBuffer(raw)
+ dec = json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&base); err != nil {
+ return err
+ }
+
+ var result Jar
+
+ if base.Kind != result.Kind() {
+ /* Not the type we're looking for. */
+ return errors.New(422, "invalid kind value: %q", base.Kind)
+ }
+
+ result.APIVersion = data.APIVersion
+ result.Spec = data.Spec
+
+ *m = result
+
+ return nil
+}
+
+// MarshalJSON marshals this object with a polymorphic type to a JSON structure
+func (m Jar) MarshalJSON() ([]byte, error) {
+ var b1, b2, b3 []byte
+ var err error
+ b1, err = json.Marshal(struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec JarSchema `json:"spec"`
+ }{
+
+ APIVersion: m.APIVersion,
+
+ Spec: m.Spec,
+ })
+ if err != nil {
+ return nil, err
+ }
+ b2, err = json.Marshal(struct {
+ Kind string `json:"kind"`
+ }{
+
+ Kind: m.Kind(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return swag.ConcatJSON(b1, b2, b3), nil
+}
+
+// Validate validates this jar
+func (m *Jar) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAPIVersion(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSpec(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *Jar) validateAPIVersion(formats strfmt.Registry) error {
+
+ if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *Jar) validateSpec(formats strfmt.Registry) error {
+
+ if m.Spec == nil {
+ return errors.Required("spec", "body", nil)
+ }
+
+ return nil
+}
+
+// ContextValidate validate this jar based on the context it is used
+func (m *Jar) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *Jar) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *Jar) UnmarshalBinary(b []byte) error {
+ var res Jar
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_schema.go
new file mode 100644
index 00000000000..0cd3126ef51
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_schema.go
@@ -0,0 +1,29 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// JarSchema JAR Schema
+//
+// # Schema for JAR objects
+//
+// swagger:model jarSchema
+type JarSchema any
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go
new file mode 100644
index 00000000000..64335c36879
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go
@@ -0,0 +1,602 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "encoding/json"
+ stderrors "errors"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// JarV001Schema JAR v0.0.1 Schema
+//
+// # Schema for JAR entries
+//
+// swagger:model jarV001Schema
+type JarV001Schema struct {
+
+ // archive
+ // Required: true
+ Archive *JarV001SchemaArchive `json:"archive"`
+
+ // signature
+ Signature *JarV001SchemaSignature `json:"signature,omitempty"`
+}
+
+// Validate validates this jar v001 schema
+func (m *JarV001Schema) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateArchive(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSignature(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *JarV001Schema) validateArchive(formats strfmt.Registry) error {
+
+ if err := validate.Required("archive", "body", m.Archive); err != nil {
+ return err
+ }
+
+ if m.Archive != nil {
+ if err := m.Archive.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("archive")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("archive")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *JarV001Schema) validateSignature(formats strfmt.Registry) error {
+ if swag.IsZero(m.Signature) { // not required
+ return nil
+ }
+
+ if m.Signature != nil {
+ if err := m.Signature.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("signature")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("signature")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this jar v001 schema based on the context it is used
+func (m *JarV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateArchive(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateSignature(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *JarV001Schema) contextValidateArchive(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Archive != nil {
+
+ if err := m.Archive.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("archive")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("archive")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *JarV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Signature != nil {
+
+ if swag.IsZero(m.Signature) { // not required
+ return nil
+ }
+
+ if err := m.Signature.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("signature")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("signature")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *JarV001Schema) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *JarV001Schema) UnmarshalBinary(b []byte) error {
+ var res JarV001Schema
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// JarV001SchemaArchive Information about the archive associated with the entry
+//
+// swagger:model JarV001SchemaArchive
+type JarV001SchemaArchive struct {
+
+ // Specifies the archive inline within the document
+ // Format: byte
+ Content strfmt.Base64 `json:"content,omitempty"`
+
+ // hash
+ Hash *JarV001SchemaArchiveHash `json:"hash,omitempty"`
+}
+
+// Validate validates this jar v001 schema archive
+func (m *JarV001SchemaArchive) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *JarV001SchemaArchive) validateHash(formats strfmt.Registry) error {
+ if swag.IsZero(m.Hash) { // not required
+ return nil
+ }
+
+ if m.Hash != nil {
+ if err := m.Hash.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("archive" + "." + "hash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("archive" + "." + "hash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this jar v001 schema archive based on the context it is used
+func (m *JarV001SchemaArchive) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateHash(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *JarV001SchemaArchive) contextValidateHash(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Hash != nil {
+
+ if swag.IsZero(m.Hash) { // not required
+ return nil
+ }
+
+ if err := m.Hash.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("archive" + "." + "hash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("archive" + "." + "hash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *JarV001SchemaArchive) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *JarV001SchemaArchive) UnmarshalBinary(b []byte) error {
+ var res JarV001SchemaArchive
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// JarV001SchemaArchiveHash Specifies the hash algorithm and value encompassing the entire signed archive
+//
+// swagger:model JarV001SchemaArchiveHash
+type JarV001SchemaArchiveHash struct {
+
+ // The hashing function used to compute the hash value
+ // Required: true
+ // Enum: ["sha256"]
+ Algorithm *string `json:"algorithm"`
+
+ // The hash value for the archive
+ // Required: true
+ Value *string `json:"value"`
+}
+
+// Validate validates this jar v001 schema archive hash
+func (m *JarV001SchemaArchiveHash) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAlgorithm(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateValue(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+var jarV001SchemaArchiveHashTypeAlgorithmPropEnum []any
+
+func init() {
+ var res []string
+ if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil {
+ panic(err)
+ }
+ for _, v := range res {
+ jarV001SchemaArchiveHashTypeAlgorithmPropEnum = append(jarV001SchemaArchiveHashTypeAlgorithmPropEnum, v)
+ }
+}
+
+const (
+
+ // JarV001SchemaArchiveHashAlgorithmSha256 captures enum value "sha256"
+ JarV001SchemaArchiveHashAlgorithmSha256 string = "sha256"
+)
+
+// prop value enum
+func (m *JarV001SchemaArchiveHash) validateAlgorithmEnum(path, location string, value string) error {
+ if err := validate.EnumCase(path, location, value, jarV001SchemaArchiveHashTypeAlgorithmPropEnum, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *JarV001SchemaArchiveHash) validateAlgorithm(formats strfmt.Registry) error {
+
+ if err := validate.Required("archive"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil {
+ return err
+ }
+
+ // value enum
+ if err := m.validateAlgorithmEnum("archive"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *JarV001SchemaArchiveHash) validateValue(formats strfmt.Registry) error {
+
+ if err := validate.Required("archive"+"."+"hash"+"."+"value", "body", m.Value); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validates this jar v001 schema archive hash based on context it is used
+func (m *JarV001SchemaArchiveHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *JarV001SchemaArchiveHash) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *JarV001SchemaArchiveHash) UnmarshalBinary(b []byte) error {
+ var res JarV001SchemaArchiveHash
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// JarV001SchemaSignature Information about the included signature in the JAR file
+//
+// swagger:model JarV001SchemaSignature
+type JarV001SchemaSignature struct {
+
+ // Specifies the PKCS7 signature embedded within the JAR file
+ // Required: true
+ // Read Only: true
+ // Format: byte
+ Content strfmt.Base64 `json:"content"`
+
+ // public key
+ // Required: true
+ PublicKey *JarV001SchemaSignaturePublicKey `json:"publicKey"`
+}
+
+// Validate validates this jar v001 schema signature
+func (m *JarV001SchemaSignature) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateContent(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validatePublicKey(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *JarV001SchemaSignature) validateContent(formats strfmt.Registry) error {
+
+ if err := validate.Required("signature"+"."+"content", "body", m.Content); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *JarV001SchemaSignature) validatePublicKey(formats strfmt.Registry) error {
+
+ if err := validate.Required("signature"+"."+"publicKey", "body", m.PublicKey); err != nil {
+ return err
+ }
+
+ if m.PublicKey != nil {
+ if err := m.PublicKey.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("signature" + "." + "publicKey")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("signature" + "." + "publicKey")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this jar v001 schema signature based on the context it is used
+func (m *JarV001SchemaSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateContent(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidatePublicKey(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *JarV001SchemaSignature) contextValidateContent(ctx context.Context, formats strfmt.Registry) error {
+
+ if err := validate.ReadOnly(ctx, "signature"+"."+"content", "body", m.Content); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *JarV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.PublicKey != nil {
+
+ if err := m.PublicKey.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("signature" + "." + "publicKey")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("signature" + "." + "publicKey")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *JarV001SchemaSignature) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *JarV001SchemaSignature) UnmarshalBinary(b []byte) error {
+ var res JarV001SchemaSignature
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// JarV001SchemaSignaturePublicKey The X509 certificate containing the public key JAR which verifies the signature of the JAR
+//
+// swagger:model JarV001SchemaSignaturePublicKey
+type JarV001SchemaSignaturePublicKey struct {
+
+ // Specifies the content of the X509 certificate containing the public key used to verify the signature
+ // Required: true
+ // Format: byte
+ Content *strfmt.Base64 `json:"content"`
+}
+
+// Validate validates this jar v001 schema signature public key
+func (m *JarV001SchemaSignaturePublicKey) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateContent(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *JarV001SchemaSignaturePublicKey) validateContent(formats strfmt.Registry) error {
+
+ if err := validate.Required("signature"+"."+"publicKey"+"."+"content", "body", m.Content); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validate this jar v001 schema signature public key based on the context it is used
+func (m *JarV001SchemaSignaturePublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *JarV001SchemaSignaturePublicKey) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *JarV001SchemaSignaturePublicKey) UnmarshalBinary(b []byte) error {
+ var res JarV001SchemaSignaturePublicKey
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go
new file mode 100644
index 00000000000..65cf4f4b77e
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go
@@ -0,0 +1,474 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ stderrors "errors"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// LogEntry log entry
+//
+// swagger:model LogEntry
+type LogEntry map[string]LogEntryAnon
+
+// Validate validates this log entry
+func (m LogEntry) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ for k := range m {
+
+ if swag.IsZero(m[k]) { // not required
+ continue
+ }
+ if val, ok := m[k]; ok {
+ if err := val.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName(k)
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName(k)
+ }
+
+ return err
+ }
+ }
+
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// ContextValidate validate this log entry based on the context it is used
+func (m LogEntry) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ for k := range m {
+
+ if val, ok := m[k]; ok {
+ if err := val.ContextValidate(ctx, formats); err != nil {
+ return err
+ }
+ }
+
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// LogEntryAnon log entry anon
+//
+// swagger:model LogEntryAnon
+type LogEntryAnon struct {
+
+ // attestation
+ Attestation *LogEntryAnonAttestation `json:"attestation,omitempty"`
+
+ // body
+ // Required: true
+ Body any `json:"body"`
+
+ // The time the entry was added to the log as a Unix timestamp in seconds
+ // Required: true
+ IntegratedTime *int64 `json:"integratedTime"`
+
+ // This is the SHA256 hash of the DER-encoded public key for the log at the time the entry was included in the log
+ // Required: true
+ // Pattern: ^[0-9a-fA-F]{64}$
+ LogID *string `json:"logID"`
+
+ // log index
+ // Required: true
+ // Minimum: 0
+ LogIndex *int64 `json:"logIndex"`
+
+ // verification
+ Verification *LogEntryAnonVerification `json:"verification,omitempty"`
+}
+
+// Validate validates this log entry anon
+func (m *LogEntryAnon) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAttestation(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateBody(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateIntegratedTime(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateLogID(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateLogIndex(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateVerification(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *LogEntryAnon) validateAttestation(formats strfmt.Registry) error {
+ if swag.IsZero(m.Attestation) { // not required
+ return nil
+ }
+
+ if m.Attestation != nil {
+ if err := m.Attestation.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("attestation")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("attestation")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *LogEntryAnon) validateBody(formats strfmt.Registry) error {
+
+ if m.Body == nil {
+ return errors.Required("body", "body", nil)
+ }
+
+ return nil
+}
+
+func (m *LogEntryAnon) validateIntegratedTime(formats strfmt.Registry) error {
+
+ if err := validate.Required("integratedTime", "body", m.IntegratedTime); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *LogEntryAnon) validateLogID(formats strfmt.Registry) error {
+
+ if err := validate.Required("logID", "body", m.LogID); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("logID", "body", *m.LogID, `^[0-9a-fA-F]{64}$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *LogEntryAnon) validateLogIndex(formats strfmt.Registry) error {
+
+ if err := validate.Required("logIndex", "body", m.LogIndex); err != nil {
+ return err
+ }
+
+ if err := validate.MinimumInt("logIndex", "body", *m.LogIndex, 0, false); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *LogEntryAnon) validateVerification(formats strfmt.Registry) error {
+ if swag.IsZero(m.Verification) { // not required
+ return nil
+ }
+
+ if m.Verification != nil {
+ if err := m.Verification.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("verification")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("verification")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this log entry anon based on the context it is used
+func (m *LogEntryAnon) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateAttestation(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateVerification(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *LogEntryAnon) contextValidateAttestation(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Attestation != nil {
+
+ if swag.IsZero(m.Attestation) { // not required
+ return nil
+ }
+
+ if err := m.Attestation.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("attestation")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("attestation")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *LogEntryAnon) contextValidateVerification(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Verification != nil {
+
+ if swag.IsZero(m.Verification) { // not required
+ return nil
+ }
+
+ if err := m.Verification.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("verification")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("verification")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *LogEntryAnon) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *LogEntryAnon) UnmarshalBinary(b []byte) error {
+ var res LogEntryAnon
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// LogEntryAnonAttestation log entry anon attestation
+//
+// swagger:model LogEntryAnonAttestation
+type LogEntryAnonAttestation struct {
+
+ // data
+ // Format: byte
+ Data strfmt.Base64 `json:"data,omitempty"`
+}
+
+// Validate validates this log entry anon attestation
+func (m *LogEntryAnonAttestation) Validate(formats strfmt.Registry) error {
+ return nil
+}
+
+// ContextValidate validates this log entry anon attestation based on context it is used
+func (m *LogEntryAnonAttestation) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *LogEntryAnonAttestation) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *LogEntryAnonAttestation) UnmarshalBinary(b []byte) error {
+ var res LogEntryAnonAttestation
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// LogEntryAnonVerification log entry anon verification
+//
+// swagger:model LogEntryAnonVerification
+type LogEntryAnonVerification struct {
+
+ // inclusion proof
+ InclusionProof *InclusionProof `json:"inclusionProof,omitempty"`
+
+ // Signature over the logID, logIndex, body and integratedTime.
+ // Format: byte
+ SignedEntryTimestamp strfmt.Base64 `json:"signedEntryTimestamp,omitempty"`
+}
+
+// Validate validates this log entry anon verification
+func (m *LogEntryAnonVerification) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateInclusionProof(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *LogEntryAnonVerification) validateInclusionProof(formats strfmt.Registry) error {
+ if swag.IsZero(m.InclusionProof) { // not required
+ return nil
+ }
+
+ if m.InclusionProof != nil {
+ if err := m.InclusionProof.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("verification" + "." + "inclusionProof")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("verification" + "." + "inclusionProof")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this log entry anon verification based on the context it is used
+func (m *LogEntryAnonVerification) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateInclusionProof(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *LogEntryAnonVerification) contextValidateInclusionProof(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.InclusionProof != nil {
+
+ if swag.IsZero(m.InclusionProof) { // not required
+ return nil
+ }
+
+ if err := m.InclusionProof.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("verification" + "." + "inclusionProof")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("verification" + "." + "inclusionProof")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *LogEntryAnonVerification) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *LogEntryAnonVerification) UnmarshalBinary(b []byte) error {
+ var res LogEntryAnonVerification
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go
new file mode 100644
index 00000000000..6cbb9d64a2f
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go
@@ -0,0 +1,230 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ stderrors "errors"
+ "strconv"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// LogInfo log info
+//
+// swagger:model LogInfo
+type LogInfo struct {
+
+ // inactive shards
+ InactiveShards []*InactiveShardLogInfo `json:"inactiveShards"`
+
+ // The current hash value stored at the root of the merkle tree
+ // Required: true
+ // Pattern: ^[0-9a-fA-F]{64}$
+ RootHash *string `json:"rootHash"`
+
+ // The current signed tree head
+ // Required: true
+ SignedTreeHead *string `json:"signedTreeHead"`
+
+ // The current treeID
+ // Required: true
+ // Pattern: ^[0-9]+$
+ TreeID *string `json:"treeID"`
+
+ // The current number of nodes in the merkle tree
+ // Required: true
+ // Minimum: 1
+ TreeSize *int64 `json:"treeSize"`
+}
+
+// Validate validates this log info
+func (m *LogInfo) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateInactiveShards(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateRootHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSignedTreeHead(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateTreeID(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateTreeSize(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *LogInfo) validateInactiveShards(formats strfmt.Registry) error {
+ if swag.IsZero(m.InactiveShards) { // not required
+ return nil
+ }
+
+ for i := 0; i < len(m.InactiveShards); i++ {
+ if swag.IsZero(m.InactiveShards[i]) { // not required
+ continue
+ }
+
+ if m.InactiveShards[i] != nil {
+ if err := m.InactiveShards[i].Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("inactiveShards" + "." + strconv.Itoa(i))
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("inactiveShards" + "." + strconv.Itoa(i))
+ }
+
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+func (m *LogInfo) validateRootHash(formats strfmt.Registry) error {
+
+ if err := validate.Required("rootHash", "body", m.RootHash); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *LogInfo) validateSignedTreeHead(formats strfmt.Registry) error {
+
+ if err := validate.Required("signedTreeHead", "body", m.SignedTreeHead); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *LogInfo) validateTreeID(formats strfmt.Registry) error {
+
+ if err := validate.Required("treeID", "body", m.TreeID); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("treeID", "body", *m.TreeID, `^[0-9]+$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *LogInfo) validateTreeSize(formats strfmt.Registry) error {
+
+ if err := validate.Required("treeSize", "body", m.TreeSize); err != nil {
+ return err
+ }
+
+ if err := validate.MinimumInt("treeSize", "body", *m.TreeSize, 1, false); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validate this log info based on the context it is used
+func (m *LogInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateInactiveShards(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *LogInfo) contextValidateInactiveShards(ctx context.Context, formats strfmt.Registry) error {
+
+ for i := 0; i < len(m.InactiveShards); i++ {
+
+ if m.InactiveShards[i] != nil {
+
+ if swag.IsZero(m.InactiveShards[i]) { // not required
+ return nil
+ }
+
+ if err := m.InactiveShards[i].ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("inactiveShards" + "." + strconv.Itoa(i))
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("inactiveShards" + "." + strconv.Itoa(i))
+ }
+
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *LogInfo) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *LogInfo) UnmarshalBinary(b []byte) error {
+ var res LogInfo
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go
new file mode 100644
index 00000000000..5b734a5fff3
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go
@@ -0,0 +1,195 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "io"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/validate"
+)
+
+// ProposedEntry proposed entry
+//
+// swagger:discriminator ProposedEntry kind
+type ProposedEntry interface {
+ runtime.Validatable
+ runtime.ContextValidatable
+
+ // kind
+ // Required: true
+ Kind() string
+ SetKind(string)
+
+ // AdditionalProperties in base type shoud be handled just like regular properties
+ // At this moment, the base type property is pushed down to the subtype
+}
+
+type proposedEntry struct {
+ kindField string
+}
+
+// Kind gets the kind of this polymorphic type
+func (m *proposedEntry) Kind() string {
+ return "ProposedEntry"
+}
+
+// SetKind sets the kind of this polymorphic type
+func (m *proposedEntry) SetKind(val string) {
+}
+
+// UnmarshalProposedEntrySlice unmarshals polymorphic slices of ProposedEntry
+func UnmarshalProposedEntrySlice(reader io.Reader, consumer runtime.Consumer) ([]ProposedEntry, error) {
+ var elements []json.RawMessage
+ if err := consumer.Consume(reader, &elements); err != nil {
+ return nil, err
+ }
+
+ var result []ProposedEntry
+ for _, element := range elements {
+ obj, err := unmarshalProposedEntry(element, consumer)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, obj)
+ }
+ return result, nil
+}
+
+// UnmarshalProposedEntry unmarshals polymorphic ProposedEntry
+func UnmarshalProposedEntry(reader io.Reader, consumer runtime.Consumer) (ProposedEntry, error) {
+ // we need to read this twice, so first into a buffer
+ data, err := io.ReadAll(reader)
+ if err != nil {
+ return nil, err
+ }
+ return unmarshalProposedEntry(data, consumer)
+}
+
+func unmarshalProposedEntry(data []byte, consumer runtime.Consumer) (ProposedEntry, error) {
+ buf := bytes.NewBuffer(data)
+ buf2 := bytes.NewBuffer(data)
+
+ // the first time this is read is to fetch the value of the kind property.
+ var getType struct {
+ Kind string `json:"kind"`
+ }
+ if err := consumer.Consume(buf, &getType); err != nil {
+ return nil, err
+ }
+
+ if err := validate.RequiredString("kind", "body", getType.Kind); err != nil {
+ return nil, err
+ }
+
+ // The value of kind is used to determine which type to create and unmarshal the data into
+ switch getType.Kind {
+ case "ProposedEntry":
+ var result proposedEntry
+ if err := consumer.Consume(buf2, &result); err != nil {
+ return nil, err
+ }
+ return &result, nil
+ case "alpine":
+ var result Alpine
+ if err := consumer.Consume(buf2, &result); err != nil {
+ return nil, err
+ }
+ return &result, nil
+ case "cose":
+ var result Cose
+ if err := consumer.Consume(buf2, &result); err != nil {
+ return nil, err
+ }
+ return &result, nil
+ case "dsse":
+ var result DSSE
+ if err := consumer.Consume(buf2, &result); err != nil {
+ return nil, err
+ }
+ return &result, nil
+ case "hashedrekord":
+ var result Hashedrekord
+ if err := consumer.Consume(buf2, &result); err != nil {
+ return nil, err
+ }
+ return &result, nil
+ case "helm":
+ var result Helm
+ if err := consumer.Consume(buf2, &result); err != nil {
+ return nil, err
+ }
+ return &result, nil
+ case "intoto":
+ var result Intoto
+ if err := consumer.Consume(buf2, &result); err != nil {
+ return nil, err
+ }
+ return &result, nil
+ case "jar":
+ var result Jar
+ if err := consumer.Consume(buf2, &result); err != nil {
+ return nil, err
+ }
+ return &result, nil
+ case "rekord":
+ var result Rekord
+ if err := consumer.Consume(buf2, &result); err != nil {
+ return nil, err
+ }
+ return &result, nil
+ case "rfc3161":
+ var result Rfc3161
+ if err := consumer.Consume(buf2, &result); err != nil {
+ return nil, err
+ }
+ return &result, nil
+ case "rpm":
+ var result Rpm
+ if err := consumer.Consume(buf2, &result); err != nil {
+ return nil, err
+ }
+ return &result, nil
+ case "tuf":
+ var result TUF
+ if err := consumer.Consume(buf2, &result); err != nil {
+ return nil, err
+ }
+ return &result, nil
+ }
+ return nil, errors.New(422, "invalid kind value: %q", getType.Kind)
+}
+
+// Validate validates this proposed entry
+func (m *proposedEntry) Validate(formats strfmt.Registry) error {
+ return nil
+}
+
+// ContextValidate validates this proposed entry based on context it is used
+func (m *proposedEntry) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord.go
new file mode 100644
index 00000000000..81c8ff05454
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord.go
@@ -0,0 +1,210 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// Rekord Rekord object
+//
+// swagger:model rekord
+type Rekord struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec RekordSchema `json:"spec"`
+}
+
+// Kind gets the kind of this subtype
+func (m *Rekord) Kind() string {
+ return "rekord"
+}
+
+// SetKind sets the kind of this subtype
+func (m *Rekord) SetKind(val string) {
+}
+
+// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure
+func (m *Rekord) UnmarshalJSON(raw []byte) error {
+ var data struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec RekordSchema `json:"spec"`
+ }
+ buf := bytes.NewBuffer(raw)
+ dec := json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&data); err != nil {
+ return err
+ }
+
+ var base struct {
+ /* Just the base type fields. Used for unmashalling polymorphic types.*/
+
+ Kind string `json:"kind"`
+ }
+ buf = bytes.NewBuffer(raw)
+ dec = json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&base); err != nil {
+ return err
+ }
+
+ var result Rekord
+
+ if base.Kind != result.Kind() {
+ /* Not the type we're looking for. */
+ return errors.New(422, "invalid kind value: %q", base.Kind)
+ }
+
+ result.APIVersion = data.APIVersion
+ result.Spec = data.Spec
+
+ *m = result
+
+ return nil
+}
+
+// MarshalJSON marshals this object with a polymorphic type to a JSON structure
+func (m Rekord) MarshalJSON() ([]byte, error) {
+ var b1, b2, b3 []byte
+ var err error
+ b1, err = json.Marshal(struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec RekordSchema `json:"spec"`
+ }{
+
+ APIVersion: m.APIVersion,
+
+ Spec: m.Spec,
+ })
+ if err != nil {
+ return nil, err
+ }
+ b2, err = json.Marshal(struct {
+ Kind string `json:"kind"`
+ }{
+
+ Kind: m.Kind(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return swag.ConcatJSON(b1, b2, b3), nil
+}
+
+// Validate validates this rekord
+func (m *Rekord) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAPIVersion(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSpec(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *Rekord) validateAPIVersion(formats strfmt.Registry) error {
+
+ if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *Rekord) validateSpec(formats strfmt.Registry) error {
+
+ if m.Spec == nil {
+ return errors.Required("spec", "body", nil)
+ }
+
+ return nil
+}
+
+// ContextValidate validate this rekord based on the context it is used
+func (m *Rekord) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *Rekord) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *Rekord) UnmarshalBinary(b []byte) error {
+ var res Rekord
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_schema.go
new file mode 100644
index 00000000000..9c33e4044e9
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_schema.go
@@ -0,0 +1,29 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// RekordSchema Rekor Schema
+//
+// # Schema for Rekord objects
+//
+// swagger:model rekordSchema
+type RekordSchema any
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go
new file mode 100644
index 00000000000..0f4977ca736
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go
@@ -0,0 +1,644 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "encoding/json"
+ stderrors "errors"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// RekordV001Schema Rekor v0.0.1 Schema
+//
+// # Schema for Rekord object
+//
+// swagger:model rekordV001Schema
+type RekordV001Schema struct {
+
+ // data
+ // Required: true
+ Data *RekordV001SchemaData `json:"data"`
+
+ // signature
+ // Required: true
+ Signature *RekordV001SchemaSignature `json:"signature"`
+}
+
+// Validate validates this rekord v001 schema
+func (m *RekordV001Schema) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateData(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSignature(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *RekordV001Schema) validateData(formats strfmt.Registry) error {
+
+ if err := validate.Required("data", "body", m.Data); err != nil {
+ return err
+ }
+
+ if m.Data != nil {
+ if err := m.Data.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("data")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("data")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *RekordV001Schema) validateSignature(formats strfmt.Registry) error {
+
+ if err := validate.Required("signature", "body", m.Signature); err != nil {
+ return err
+ }
+
+ if m.Signature != nil {
+ if err := m.Signature.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("signature")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("signature")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this rekord v001 schema based on the context it is used
+func (m *RekordV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateData(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateSignature(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *RekordV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Data != nil {
+
+ if err := m.Data.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("data")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("data")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *RekordV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Signature != nil {
+
+ if err := m.Signature.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("signature")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("signature")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *RekordV001Schema) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *RekordV001Schema) UnmarshalBinary(b []byte) error {
+ var res RekordV001Schema
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// RekordV001SchemaData Information about the content associated with the entry
+//
+// swagger:model RekordV001SchemaData
+type RekordV001SchemaData struct {
+
+ // Specifies the content inline within the document
+ // Format: byte
+ Content strfmt.Base64 `json:"content,omitempty"`
+
+ // hash
+ Hash *RekordV001SchemaDataHash `json:"hash,omitempty"`
+}
+
+// Validate validates this rekord v001 schema data
+func (m *RekordV001SchemaData) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *RekordV001SchemaData) validateHash(formats strfmt.Registry) error {
+ if swag.IsZero(m.Hash) { // not required
+ return nil
+ }
+
+ if m.Hash != nil {
+ if err := m.Hash.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("data" + "." + "hash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("data" + "." + "hash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this rekord v001 schema data based on the context it is used
+func (m *RekordV001SchemaData) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateHash(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *RekordV001SchemaData) contextValidateHash(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Hash != nil {
+
+ if swag.IsZero(m.Hash) { // not required
+ return nil
+ }
+
+ if err := m.Hash.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("data" + "." + "hash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("data" + "." + "hash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *RekordV001SchemaData) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *RekordV001SchemaData) UnmarshalBinary(b []byte) error {
+ var res RekordV001SchemaData
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// RekordV001SchemaDataHash Specifies the hash algorithm and value for the content
+//
+// swagger:model RekordV001SchemaDataHash
+type RekordV001SchemaDataHash struct {
+
+ // The hashing function used to compute the hash value
+ // Required: true
+ // Enum: ["sha256"]
+ Algorithm *string `json:"algorithm"`
+
+ // The hash value for the content
+ // Required: true
+ Value *string `json:"value"`
+}
+
+// Validate validates this rekord v001 schema data hash
+func (m *RekordV001SchemaDataHash) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAlgorithm(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateValue(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+var rekordV001SchemaDataHashTypeAlgorithmPropEnum []any
+
+func init() {
+ var res []string
+ if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil {
+ panic(err)
+ }
+ for _, v := range res {
+ rekordV001SchemaDataHashTypeAlgorithmPropEnum = append(rekordV001SchemaDataHashTypeAlgorithmPropEnum, v)
+ }
+}
+
+const (
+
+ // RekordV001SchemaDataHashAlgorithmSha256 captures enum value "sha256"
+ RekordV001SchemaDataHashAlgorithmSha256 string = "sha256"
+)
+
+// prop value enum
+func (m *RekordV001SchemaDataHash) validateAlgorithmEnum(path, location string, value string) error {
+ if err := validate.EnumCase(path, location, value, rekordV001SchemaDataHashTypeAlgorithmPropEnum, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *RekordV001SchemaDataHash) validateAlgorithm(formats strfmt.Registry) error {
+
+ if err := validate.Required("data"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil {
+ return err
+ }
+
+ // value enum
+ if err := m.validateAlgorithmEnum("data"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *RekordV001SchemaDataHash) validateValue(formats strfmt.Registry) error {
+
+ if err := validate.Required("data"+"."+"hash"+"."+"value", "body", m.Value); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validate this rekord v001 schema data hash based on the context it is used
+func (m *RekordV001SchemaDataHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *RekordV001SchemaDataHash) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *RekordV001SchemaDataHash) UnmarshalBinary(b []byte) error {
+ var res RekordV001SchemaDataHash
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// RekordV001SchemaSignature Information about the detached signature associated with the entry
+//
+// swagger:model RekordV001SchemaSignature
+type RekordV001SchemaSignature struct {
+
+ // Specifies the content of the signature inline within the document
+ // Required: true
+ // Format: byte
+ Content *strfmt.Base64 `json:"content"`
+
+ // Specifies the format of the signature
+ // Required: true
+ // Enum: ["pgp","minisign","x509","ssh"]
+ Format *string `json:"format"`
+
+ // public key
+ // Required: true
+ PublicKey *RekordV001SchemaSignaturePublicKey `json:"publicKey"`
+}
+
+// Validate validates this rekord v001 schema signature
+func (m *RekordV001SchemaSignature) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateContent(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateFormat(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validatePublicKey(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *RekordV001SchemaSignature) validateContent(formats strfmt.Registry) error {
+
+ if err := validate.Required("signature"+"."+"content", "body", m.Content); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+var rekordV001SchemaSignatureTypeFormatPropEnum []any
+
+func init() {
+ var res []string
+ if err := json.Unmarshal([]byte(`["pgp","minisign","x509","ssh"]`), &res); err != nil {
+ panic(err)
+ }
+ for _, v := range res {
+ rekordV001SchemaSignatureTypeFormatPropEnum = append(rekordV001SchemaSignatureTypeFormatPropEnum, v)
+ }
+}
+
+const (
+
+ // RekordV001SchemaSignatureFormatPgp captures enum value "pgp"
+ RekordV001SchemaSignatureFormatPgp string = "pgp"
+
+ // RekordV001SchemaSignatureFormatMinisign captures enum value "minisign"
+ RekordV001SchemaSignatureFormatMinisign string = "minisign"
+
+ // RekordV001SchemaSignatureFormatX509 captures enum value "x509"
+ RekordV001SchemaSignatureFormatX509 string = "x509"
+
+ // RekordV001SchemaSignatureFormatSSH captures enum value "ssh"
+ RekordV001SchemaSignatureFormatSSH string = "ssh"
+)
+
+// prop value enum
+func (m *RekordV001SchemaSignature) validateFormatEnum(path, location string, value string) error {
+ if err := validate.EnumCase(path, location, value, rekordV001SchemaSignatureTypeFormatPropEnum, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *RekordV001SchemaSignature) validateFormat(formats strfmt.Registry) error {
+
+ if err := validate.Required("signature"+"."+"format", "body", m.Format); err != nil {
+ return err
+ }
+
+ // value enum
+ if err := m.validateFormatEnum("signature"+"."+"format", "body", *m.Format); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *RekordV001SchemaSignature) validatePublicKey(formats strfmt.Registry) error {
+
+ if err := validate.Required("signature"+"."+"publicKey", "body", m.PublicKey); err != nil {
+ return err
+ }
+
+ if m.PublicKey != nil {
+ if err := m.PublicKey.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("signature" + "." + "publicKey")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("signature" + "." + "publicKey")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this rekord v001 schema signature based on the context it is used
+func (m *RekordV001SchemaSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidatePublicKey(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *RekordV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.PublicKey != nil {
+
+ if err := m.PublicKey.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("signature" + "." + "publicKey")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("signature" + "." + "publicKey")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *RekordV001SchemaSignature) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *RekordV001SchemaSignature) UnmarshalBinary(b []byte) error {
+ var res RekordV001SchemaSignature
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// RekordV001SchemaSignaturePublicKey The public key that can verify the signature
+//
+// swagger:model RekordV001SchemaSignaturePublicKey
+type RekordV001SchemaSignaturePublicKey struct {
+
+ // Specifies the content of the public key inline within the document
+ // Required: true
+ // Format: byte
+ Content *strfmt.Base64 `json:"content"`
+}
+
+// Validate validates this rekord v001 schema signature public key
+func (m *RekordV001SchemaSignaturePublicKey) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateContent(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *RekordV001SchemaSignaturePublicKey) validateContent(formats strfmt.Registry) error {
+
+ if err := validate.Required("signature"+"."+"publicKey"+"."+"content", "body", m.Content); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validates this rekord v001 schema signature public key based on context it is used
+func (m *RekordV001SchemaSignaturePublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *RekordV001SchemaSignaturePublicKey) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *RekordV001SchemaSignaturePublicKey) UnmarshalBinary(b []byte) error {
+ var res RekordV001SchemaSignaturePublicKey
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161.go
new file mode 100644
index 00000000000..ef8d42e7a2d
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161.go
@@ -0,0 +1,210 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// Rfc3161 RFC3161 Timestamp
+//
+// swagger:model rfc3161
+type Rfc3161 struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec Rfc3161Schema `json:"spec"`
+}
+
+// Kind gets the kind of this subtype
+func (m *Rfc3161) Kind() string {
+ return "rfc3161"
+}
+
+// SetKind sets the kind of this subtype
+func (m *Rfc3161) SetKind(val string) {
+}
+
+// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure
+func (m *Rfc3161) UnmarshalJSON(raw []byte) error {
+ var data struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec Rfc3161Schema `json:"spec"`
+ }
+ buf := bytes.NewBuffer(raw)
+ dec := json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&data); err != nil {
+ return err
+ }
+
+ var base struct {
+ /* Just the base type fields. Used for unmashalling polymorphic types.*/
+
+ Kind string `json:"kind"`
+ }
+ buf = bytes.NewBuffer(raw)
+ dec = json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&base); err != nil {
+ return err
+ }
+
+ var result Rfc3161
+
+ if base.Kind != result.Kind() {
+ /* Not the type we're looking for. */
+ return errors.New(422, "invalid kind value: %q", base.Kind)
+ }
+
+ result.APIVersion = data.APIVersion
+ result.Spec = data.Spec
+
+ *m = result
+
+ return nil
+}
+
+// MarshalJSON marshals this object with a polymorphic type to a JSON structure
+func (m Rfc3161) MarshalJSON() ([]byte, error) {
+ var b1, b2, b3 []byte
+ var err error
+ b1, err = json.Marshal(struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec Rfc3161Schema `json:"spec"`
+ }{
+
+ APIVersion: m.APIVersion,
+
+ Spec: m.Spec,
+ })
+ if err != nil {
+ return nil, err
+ }
+ b2, err = json.Marshal(struct {
+ Kind string `json:"kind"`
+ }{
+
+ Kind: m.Kind(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return swag.ConcatJSON(b1, b2, b3), nil
+}
+
+// Validate validates this rfc3161
+func (m *Rfc3161) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAPIVersion(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSpec(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *Rfc3161) validateAPIVersion(formats strfmt.Registry) error {
+
+ if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *Rfc3161) validateSpec(formats strfmt.Registry) error {
+
+ if m.Spec == nil {
+ return errors.Required("spec", "body", nil)
+ }
+
+ return nil
+}
+
+// ContextValidate validate this rfc3161 based on the context it is used
+func (m *Rfc3161) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *Rfc3161) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *Rfc3161) UnmarshalBinary(b []byte) error {
+ var res Rfc3161
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_schema.go
new file mode 100644
index 00000000000..319358d400c
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_schema.go
@@ -0,0 +1,29 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// Rfc3161Schema Timestamp Schema
+//
+// # Schema for RFC 3161 timestamp objects
+//
+// swagger:model rfc3161Schema
+type Rfc3161Schema any
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go
new file mode 100644
index 00000000000..c2037cd7ade
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go
@@ -0,0 +1,192 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ stderrors "errors"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// Rfc3161V001Schema Timestamp v0.0.1 Schema
+//
+// # Schema for RFC3161 entries
+//
+// swagger:model rfc3161V001Schema
+type Rfc3161V001Schema struct {
+
+ // tsr
+ // Required: true
+ Tsr *Rfc3161V001SchemaTsr `json:"tsr"`
+}
+
+// Validate validates this rfc3161 v001 schema
+func (m *Rfc3161V001Schema) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateTsr(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *Rfc3161V001Schema) validateTsr(formats strfmt.Registry) error {
+
+ if err := validate.Required("tsr", "body", m.Tsr); err != nil {
+ return err
+ }
+
+ if m.Tsr != nil {
+ if err := m.Tsr.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("tsr")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("tsr")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this rfc3161 v001 schema based on the context it is used
+func (m *Rfc3161V001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateTsr(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *Rfc3161V001Schema) contextValidateTsr(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Tsr != nil {
+
+ if err := m.Tsr.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("tsr")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("tsr")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *Rfc3161V001Schema) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *Rfc3161V001Schema) UnmarshalBinary(b []byte) error {
+ var res Rfc3161V001Schema
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// Rfc3161V001SchemaTsr Information about the tsr file associated with the entry
+//
+// swagger:model Rfc3161V001SchemaTsr
+type Rfc3161V001SchemaTsr struct {
+
+ // Specifies the tsr file content inline within the document
+ // Required: true
+ // Format: byte
+ Content *strfmt.Base64 `json:"content"`
+}
+
+// Validate validates this rfc3161 v001 schema tsr
+func (m *Rfc3161V001SchemaTsr) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateContent(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *Rfc3161V001SchemaTsr) validateContent(formats strfmt.Registry) error {
+
+ if err := validate.Required("tsr"+"."+"content", "body", m.Content); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validates this rfc3161 v001 schema tsr based on context it is used
+func (m *Rfc3161V001SchemaTsr) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *Rfc3161V001SchemaTsr) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *Rfc3161V001SchemaTsr) UnmarshalBinary(b []byte) error {
+ var res Rfc3161V001SchemaTsr
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm.go
new file mode 100644
index 00000000000..8b1f10c77e6
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm.go
@@ -0,0 +1,210 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// Rpm RPM package
+//
+// swagger:model rpm
+type Rpm struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec RpmSchema `json:"spec"`
+}
+
+// Kind gets the kind of this subtype
+func (m *Rpm) Kind() string {
+ return "rpm"
+}
+
+// SetKind sets the kind of this subtype
+func (m *Rpm) SetKind(val string) {
+}
+
+// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure
+func (m *Rpm) UnmarshalJSON(raw []byte) error {
+ var data struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec RpmSchema `json:"spec"`
+ }
+ buf := bytes.NewBuffer(raw)
+ dec := json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&data); err != nil {
+ return err
+ }
+
+ var base struct {
+ /* Just the base type fields. Used for unmashalling polymorphic types.*/
+
+ Kind string `json:"kind"`
+ }
+ buf = bytes.NewBuffer(raw)
+ dec = json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&base); err != nil {
+ return err
+ }
+
+ var result Rpm
+
+ if base.Kind != result.Kind() {
+ /* Not the type we're looking for. */
+ return errors.New(422, "invalid kind value: %q", base.Kind)
+ }
+
+ result.APIVersion = data.APIVersion
+ result.Spec = data.Spec
+
+ *m = result
+
+ return nil
+}
+
+// MarshalJSON marshals this object with a polymorphic type to a JSON structure
+func (m Rpm) MarshalJSON() ([]byte, error) {
+ var b1, b2, b3 []byte
+ var err error
+ b1, err = json.Marshal(struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec RpmSchema `json:"spec"`
+ }{
+
+ APIVersion: m.APIVersion,
+
+ Spec: m.Spec,
+ })
+ if err != nil {
+ return nil, err
+ }
+ b2, err = json.Marshal(struct {
+ Kind string `json:"kind"`
+ }{
+
+ Kind: m.Kind(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return swag.ConcatJSON(b1, b2, b3), nil
+}
+
+// Validate validates this rpm
+func (m *Rpm) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAPIVersion(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSpec(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *Rpm) validateAPIVersion(formats strfmt.Registry) error {
+
+ if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *Rpm) validateSpec(formats strfmt.Registry) error {
+
+ if m.Spec == nil {
+ return errors.Required("spec", "body", nil)
+ }
+
+ return nil
+}
+
+// ContextValidate validate this rpm based on the context it is used
+func (m *Rpm) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *Rpm) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *Rpm) UnmarshalBinary(b []byte) error {
+ var res Rpm
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_schema.go
new file mode 100644
index 00000000000..2520dfb9c78
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_schema.go
@@ -0,0 +1,29 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// RpmSchema RPM Schema
+//
+// # Schema for RPM objects
+//
+// swagger:model rpmSchema
+type RpmSchema any
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go
new file mode 100644
index 00000000000..a7636bd5fcd
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go
@@ -0,0 +1,475 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "encoding/json"
+ stderrors "errors"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// RpmV001Schema RPM v0.0.1 Schema
+//
+// # Schema for RPM entries
+//
+// swagger:model rpmV001Schema
+type RpmV001Schema struct {
+
+ // package
+ // Required: true
+ Package *RpmV001SchemaPackage `json:"package"`
+
+ // public key
+ // Required: true
+ PublicKey *RpmV001SchemaPublicKey `json:"publicKey"`
+}
+
+// Validate validates this rpm v001 schema
+func (m *RpmV001Schema) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validatePackage(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validatePublicKey(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *RpmV001Schema) validatePackage(formats strfmt.Registry) error {
+
+ if err := validate.Required("package", "body", m.Package); err != nil {
+ return err
+ }
+
+ if m.Package != nil {
+ if err := m.Package.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("package")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("package")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *RpmV001Schema) validatePublicKey(formats strfmt.Registry) error {
+
+ if err := validate.Required("publicKey", "body", m.PublicKey); err != nil {
+ return err
+ }
+
+ if m.PublicKey != nil {
+ if err := m.PublicKey.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("publicKey")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("publicKey")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this rpm v001 schema based on the context it is used
+func (m *RpmV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidatePackage(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidatePublicKey(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *RpmV001Schema) contextValidatePackage(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Package != nil {
+
+ if err := m.Package.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("package")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("package")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *RpmV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.PublicKey != nil {
+
+ if err := m.PublicKey.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("publicKey")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("publicKey")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *RpmV001Schema) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *RpmV001Schema) UnmarshalBinary(b []byte) error {
+ var res RpmV001Schema
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// RpmV001SchemaPackage Information about the package associated with the entry
+//
+// swagger:model RpmV001SchemaPackage
+type RpmV001SchemaPackage struct {
+
+ // Specifies the package inline within the document
+ // Format: byte
+ Content strfmt.Base64 `json:"content,omitempty"`
+
+ // hash
+ Hash *RpmV001SchemaPackageHash `json:"hash,omitempty"`
+
+ // Values of the RPM headers
+ // Read Only: true
+ Headers map[string]string `json:"headers,omitempty"`
+}
+
+// Validate validates this rpm v001 schema package
+func (m *RpmV001SchemaPackage) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *RpmV001SchemaPackage) validateHash(formats strfmt.Registry) error {
+ if swag.IsZero(m.Hash) { // not required
+ return nil
+ }
+
+ if m.Hash != nil {
+ if err := m.Hash.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("package" + "." + "hash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("package" + "." + "hash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this rpm v001 schema package based on the context it is used
+func (m *RpmV001SchemaPackage) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateHash(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateHeaders(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *RpmV001SchemaPackage) contextValidateHash(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Hash != nil {
+
+ if swag.IsZero(m.Hash) { // not required
+ return nil
+ }
+
+ if err := m.Hash.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("package" + "." + "hash")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("package" + "." + "hash")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *RpmV001SchemaPackage) contextValidateHeaders(ctx context.Context, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *RpmV001SchemaPackage) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *RpmV001SchemaPackage) UnmarshalBinary(b []byte) error {
+ var res RpmV001SchemaPackage
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// RpmV001SchemaPackageHash Specifies the hash algorithm and value for the package
+//
+// swagger:model RpmV001SchemaPackageHash
+type RpmV001SchemaPackageHash struct {
+
+ // The hashing function used to compute the hash value
+ // Required: true
+ // Enum: ["sha256"]
+ Algorithm *string `json:"algorithm"`
+
+ // The hash value for the package
+ // Required: true
+ Value *string `json:"value"`
+}
+
+// Validate validates this rpm v001 schema package hash
+func (m *RpmV001SchemaPackageHash) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAlgorithm(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateValue(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+var rpmV001SchemaPackageHashTypeAlgorithmPropEnum []any
+
+func init() {
+ var res []string
+ if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil {
+ panic(err)
+ }
+ for _, v := range res {
+ rpmV001SchemaPackageHashTypeAlgorithmPropEnum = append(rpmV001SchemaPackageHashTypeAlgorithmPropEnum, v)
+ }
+}
+
+const (
+
+ // RpmV001SchemaPackageHashAlgorithmSha256 captures enum value "sha256"
+ RpmV001SchemaPackageHashAlgorithmSha256 string = "sha256"
+)
+
+// prop value enum
+func (m *RpmV001SchemaPackageHash) validateAlgorithmEnum(path, location string, value string) error {
+ if err := validate.EnumCase(path, location, value, rpmV001SchemaPackageHashTypeAlgorithmPropEnum, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *RpmV001SchemaPackageHash) validateAlgorithm(formats strfmt.Registry) error {
+
+ if err := validate.Required("package"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil {
+ return err
+ }
+
+ // value enum
+ if err := m.validateAlgorithmEnum("package"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *RpmV001SchemaPackageHash) validateValue(formats strfmt.Registry) error {
+
+ if err := validate.Required("package"+"."+"hash"+"."+"value", "body", m.Value); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validates this rpm v001 schema package hash based on context it is used
+func (m *RpmV001SchemaPackageHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *RpmV001SchemaPackageHash) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *RpmV001SchemaPackageHash) UnmarshalBinary(b []byte) error {
+ var res RpmV001SchemaPackageHash
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// RpmV001SchemaPublicKey The PGP public key that can verify the RPM signature
+//
+// swagger:model RpmV001SchemaPublicKey
+type RpmV001SchemaPublicKey struct {
+
+ // Specifies the content of the public key inline within the document
+ // Required: true
+ // Format: byte
+ Content *strfmt.Base64 `json:"content"`
+}
+
+// Validate validates this rpm v001 schema public key
+func (m *RpmV001SchemaPublicKey) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateContent(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *RpmV001SchemaPublicKey) validateContent(formats strfmt.Registry) error {
+
+ if err := validate.Required("publicKey"+"."+"content", "body", m.Content); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validates this rpm v001 schema public key based on context it is used
+func (m *RpmV001SchemaPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *RpmV001SchemaPublicKey) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *RpmV001SchemaPublicKey) UnmarshalBinary(b []byte) error {
+ var res RpmV001SchemaPublicKey
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go
new file mode 100644
index 00000000000..a1b7d08001f
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go
@@ -0,0 +1,350 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "encoding/json"
+ stderrors "errors"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// SearchIndex search index
+//
+// swagger:model SearchIndex
+type SearchIndex struct {
+
+ // email
+ // Format: email
+ Email strfmt.Email `json:"email,omitempty"`
+
+ // hash
+ // Pattern: ^(sha512:)?[0-9a-fA-F]{128}$|^(sha256:)?[0-9a-fA-F]{64}$|^(sha1:)?[0-9a-fA-F]{40}$
+ Hash string `json:"hash,omitempty"`
+
+ // operator
+ // Enum: ["and","or"]
+ Operator string `json:"operator,omitempty"`
+
+ // public key
+ PublicKey *SearchIndexPublicKey `json:"publicKey,omitempty"`
+}
+
+// Validate validates this search index
+func (m *SearchIndex) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateEmail(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateOperator(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validatePublicKey(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *SearchIndex) validateEmail(formats strfmt.Registry) error {
+ if swag.IsZero(m.Email) { // not required
+ return nil
+ }
+
+ if err := validate.FormatOf("email", "body", "email", m.Email.String(), formats); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *SearchIndex) validateHash(formats strfmt.Registry) error {
+ if swag.IsZero(m.Hash) { // not required
+ return nil
+ }
+
+ if err := validate.Pattern("hash", "body", m.Hash, `^(sha512:)?[0-9a-fA-F]{128}$|^(sha256:)?[0-9a-fA-F]{64}$|^(sha1:)?[0-9a-fA-F]{40}$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+var searchIndexTypeOperatorPropEnum []any
+
+func init() {
+ var res []string
+ if err := json.Unmarshal([]byte(`["and","or"]`), &res); err != nil {
+ panic(err)
+ }
+ for _, v := range res {
+ searchIndexTypeOperatorPropEnum = append(searchIndexTypeOperatorPropEnum, v)
+ }
+}
+
+const (
+
+ // SearchIndexOperatorAnd captures enum value "and"
+ SearchIndexOperatorAnd string = "and"
+
+ // SearchIndexOperatorOr captures enum value "or"
+ SearchIndexOperatorOr string = "or"
+)
+
+// prop value enum
+func (m *SearchIndex) validateOperatorEnum(path, location string, value string) error {
+ if err := validate.EnumCase(path, location, value, searchIndexTypeOperatorPropEnum, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *SearchIndex) validateOperator(formats strfmt.Registry) error {
+ if swag.IsZero(m.Operator) { // not required
+ return nil
+ }
+
+ // value enum
+ if err := m.validateOperatorEnum("operator", "body", m.Operator); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *SearchIndex) validatePublicKey(formats strfmt.Registry) error {
+ if swag.IsZero(m.PublicKey) { // not required
+ return nil
+ }
+
+ if m.PublicKey != nil {
+ if err := m.PublicKey.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("publicKey")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("publicKey")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this search index based on the context it is used
+func (m *SearchIndex) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidatePublicKey(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *SearchIndex) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.PublicKey != nil {
+
+ if swag.IsZero(m.PublicKey) { // not required
+ return nil
+ }
+
+ if err := m.PublicKey.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("publicKey")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("publicKey")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *SearchIndex) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *SearchIndex) UnmarshalBinary(b []byte) error {
+ var res SearchIndex
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// SearchIndexPublicKey search index public key
+//
+// swagger:model SearchIndexPublicKey
+type SearchIndexPublicKey struct {
+
+ // content
+ // Format: byte
+ Content strfmt.Base64 `json:"content,omitempty"`
+
+ // format
+ // Required: true
+ // Enum: ["pgp","x509","minisign","ssh","tuf"]
+ Format *string `json:"format"`
+
+ // url
+ // Format: uri
+ URL strfmt.URI `json:"url,omitempty"`
+}
+
+// Validate validates this search index public key
+func (m *SearchIndexPublicKey) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateFormat(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateURL(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+var searchIndexPublicKeyTypeFormatPropEnum []any
+
+func init() {
+ var res []string
+ if err := json.Unmarshal([]byte(`["pgp","x509","minisign","ssh","tuf"]`), &res); err != nil {
+ panic(err)
+ }
+ for _, v := range res {
+ searchIndexPublicKeyTypeFormatPropEnum = append(searchIndexPublicKeyTypeFormatPropEnum, v)
+ }
+}
+
+const (
+
+ // SearchIndexPublicKeyFormatPgp captures enum value "pgp"
+ SearchIndexPublicKeyFormatPgp string = "pgp"
+
+ // SearchIndexPublicKeyFormatX509 captures enum value "x509"
+ SearchIndexPublicKeyFormatX509 string = "x509"
+
+ // SearchIndexPublicKeyFormatMinisign captures enum value "minisign"
+ SearchIndexPublicKeyFormatMinisign string = "minisign"
+
+ // SearchIndexPublicKeyFormatSSH captures enum value "ssh"
+ SearchIndexPublicKeyFormatSSH string = "ssh"
+
+ // SearchIndexPublicKeyFormatTUF captures enum value "tuf"
+ SearchIndexPublicKeyFormatTUF string = "tuf"
+)
+
+// prop value enum
+func (m *SearchIndexPublicKey) validateFormatEnum(path, location string, value string) error {
+ if err := validate.EnumCase(path, location, value, searchIndexPublicKeyTypeFormatPropEnum, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *SearchIndexPublicKey) validateFormat(formats strfmt.Registry) error {
+
+ if err := validate.Required("publicKey"+"."+"format", "body", m.Format); err != nil {
+ return err
+ }
+
+ // value enum
+ if err := m.validateFormatEnum("publicKey"+"."+"format", "body", *m.Format); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *SearchIndexPublicKey) validateURL(formats strfmt.Registry) error {
+ if swag.IsZero(m.URL) { // not required
+ return nil
+ }
+
+ if err := validate.FormatOf("publicKey"+"."+"url", "body", "uri", m.URL.String(), formats); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validates this search index public key based on context it is used
+func (m *SearchIndexPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *SearchIndexPublicKey) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *SearchIndexPublicKey) UnmarshalBinary(b []byte) error {
+ var res SearchIndexPublicKey
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go
new file mode 100644
index 00000000000..6833c8f6d8f
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go
@@ -0,0 +1,306 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ stderrors "errors"
+ "io"
+ "strconv"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// SearchLogQuery search log query
+//
+// swagger:model SearchLogQuery
+type SearchLogQuery struct {
+ entriesField []ProposedEntry
+
+ // entry u UI ds
+ // Max Items: 10
+ // Min Items: 1
+ EntryUUIDs []string `json:"entryUUIDs"`
+
+ // log indexes
+ // Max Items: 10
+ // Min Items: 1
+ LogIndexes []*int64 `json:"logIndexes"`
+}
+
+// Entries gets the entries of this base type
+func (m *SearchLogQuery) Entries() []ProposedEntry {
+ return m.entriesField
+}
+
+// SetEntries sets the entries of this base type
+func (m *SearchLogQuery) SetEntries(val []ProposedEntry) {
+ m.entriesField = val
+}
+
+// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure
+func (m *SearchLogQuery) UnmarshalJSON(raw []byte) error {
+ var data struct {
+ Entries json.RawMessage `json:"entries"`
+
+ EntryUUIDs []string `json:"entryUUIDs"`
+
+ LogIndexes []*int64 `json:"logIndexes"`
+ }
+ buf := bytes.NewBuffer(raw)
+ dec := json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&data); err != nil {
+ return err
+ }
+
+ var propEntries []ProposedEntry
+ if string(data.Entries) != "null" {
+ entries, err := UnmarshalProposedEntrySlice(bytes.NewBuffer(data.Entries), runtime.JSONConsumer())
+ if err != nil && !stderrors.Is(err, io.EOF) {
+ return err
+ }
+ propEntries = entries
+ }
+
+ var result SearchLogQuery
+
+ // entries
+ result.entriesField = propEntries
+
+ // entryUUIDs
+ result.EntryUUIDs = data.EntryUUIDs
+
+ // logIndexes
+ result.LogIndexes = data.LogIndexes
+
+ *m = result
+
+ return nil
+}
+
+// MarshalJSON marshals this object with a polymorphic type to a JSON structure
+func (m SearchLogQuery) MarshalJSON() ([]byte, error) {
+ var b1, b2, b3 []byte
+ var err error
+ b1, err = json.Marshal(struct {
+ EntryUUIDs []string `json:"entryUUIDs"`
+
+ LogIndexes []*int64 `json:"logIndexes"`
+ }{
+
+ EntryUUIDs: m.EntryUUIDs,
+
+ LogIndexes: m.LogIndexes,
+ })
+ if err != nil {
+ return nil, err
+ }
+ b2, err = json.Marshal(struct {
+ Entries []ProposedEntry `json:"entries"`
+ }{
+
+ Entries: m.entriesField,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return swag.ConcatJSON(b1, b2, b3), nil
+}
+
+// Validate validates this search log query
+func (m *SearchLogQuery) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateEntries(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateEntryUUIDs(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateLogIndexes(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *SearchLogQuery) validateEntries(formats strfmt.Registry) error {
+ if swag.IsZero(m.Entries()) { // not required
+ return nil
+ }
+
+ iEntriesSize := int64(len(m.Entries()))
+
+ if err := validate.MinItems("entries", "body", iEntriesSize, 1); err != nil {
+ return err
+ }
+
+ if err := validate.MaxItems("entries", "body", iEntriesSize, 10); err != nil {
+ return err
+ }
+
+ for i := 0; i < len(m.Entries()); i++ {
+
+ if err := m.entriesField[i].Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("entries" + "." + strconv.Itoa(i))
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("entries" + "." + strconv.Itoa(i))
+ }
+
+ return err
+ }
+
+ }
+
+ return nil
+}
+
+func (m *SearchLogQuery) validateEntryUUIDs(formats strfmt.Registry) error {
+ if swag.IsZero(m.EntryUUIDs) { // not required
+ return nil
+ }
+
+ iEntryUUIDsSize := int64(len(m.EntryUUIDs))
+
+ if err := validate.MinItems("entryUUIDs", "body", iEntryUUIDsSize, 1); err != nil {
+ return err
+ }
+
+ if err := validate.MaxItems("entryUUIDs", "body", iEntryUUIDsSize, 10); err != nil {
+ return err
+ }
+
+ for i := 0; i < len(m.EntryUUIDs); i++ {
+
+ if err := validate.Pattern("entryUUIDs"+"."+strconv.Itoa(i), "body", m.EntryUUIDs[i], `^([0-9a-fA-F]{64}|[0-9a-fA-F]{80})$`); err != nil {
+ return err
+ }
+
+ }
+
+ return nil
+}
+
+func (m *SearchLogQuery) validateLogIndexes(formats strfmt.Registry) error {
+ if swag.IsZero(m.LogIndexes) { // not required
+ return nil
+ }
+
+ iLogIndexesSize := int64(len(m.LogIndexes))
+
+ if err := validate.MinItems("logIndexes", "body", iLogIndexesSize, 1); err != nil {
+ return err
+ }
+
+ if err := validate.MaxItems("logIndexes", "body", iLogIndexesSize, 10); err != nil {
+ return err
+ }
+
+ for i := 0; i < len(m.LogIndexes); i++ {
+ if swag.IsZero(m.LogIndexes[i]) { // not required
+ continue
+ }
+
+ if err := validate.MinimumInt("logIndexes"+"."+strconv.Itoa(i), "body", *m.LogIndexes[i], 0, false); err != nil {
+ return err
+ }
+
+ }
+
+ return nil
+}
+
+// ContextValidate validate this search log query based on the context it is used
+func (m *SearchLogQuery) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateEntries(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *SearchLogQuery) contextValidateEntries(ctx context.Context, formats strfmt.Registry) error {
+
+ for i := 0; i < len(m.Entries()); i++ {
+
+ if swag.IsZero(m.entriesField[i]) { // not required
+ return nil
+ }
+
+ if err := m.entriesField[i].ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("entries" + "." + strconv.Itoa(i))
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("entries" + "." + strconv.Itoa(i))
+ }
+
+ return err
+ }
+
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *SearchLogQuery) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *SearchLogQuery) UnmarshalBinary(b []byte) error {
+ var res SearchLogQuery
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf.go
new file mode 100644
index 00000000000..a5f6eff0f7a
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf.go
@@ -0,0 +1,210 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// TUF TUF metadata
+//
+// swagger:model tuf
+type TUF struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec TUFSchema `json:"spec"`
+}
+
+// Kind gets the kind of this subtype
+func (m *TUF) Kind() string {
+ return "tuf"
+}
+
+// SetKind sets the kind of this subtype
+func (m *TUF) SetKind(val string) {
+}
+
+// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure
+func (m *TUF) UnmarshalJSON(raw []byte) error {
+ var data struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec TUFSchema `json:"spec"`
+ }
+ buf := bytes.NewBuffer(raw)
+ dec := json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&data); err != nil {
+ return err
+ }
+
+ var base struct {
+ /* Just the base type fields. Used for unmashalling polymorphic types.*/
+
+ Kind string `json:"kind"`
+ }
+ buf = bytes.NewBuffer(raw)
+ dec = json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&base); err != nil {
+ return err
+ }
+
+ var result TUF
+
+ if base.Kind != result.Kind() {
+ /* Not the type we're looking for. */
+ return errors.New(422, "invalid kind value: %q", base.Kind)
+ }
+
+ result.APIVersion = data.APIVersion
+ result.Spec = data.Spec
+
+ *m = result
+
+ return nil
+}
+
+// MarshalJSON marshals this object with a polymorphic type to a JSON structure
+func (m TUF) MarshalJSON() ([]byte, error) {
+ var b1, b2, b3 []byte
+ var err error
+ b1, err = json.Marshal(struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec TUFSchema `json:"spec"`
+ }{
+
+ APIVersion: m.APIVersion,
+
+ Spec: m.Spec,
+ })
+ if err != nil {
+ return nil, err
+ }
+ b2, err = json.Marshal(struct {
+ Kind string `json:"kind"`
+ }{
+
+ Kind: m.Kind(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return swag.ConcatJSON(b1, b2, b3), nil
+}
+
+// Validate validates this tuf
+func (m *TUF) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAPIVersion(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSpec(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *TUF) validateAPIVersion(formats strfmt.Registry) error {
+
+ if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *TUF) validateSpec(formats strfmt.Registry) error {
+
+ if m.Spec == nil {
+ return errors.Required("spec", "body", nil)
+ }
+
+ return nil
+}
+
+// ContextValidate validate this tuf based on the context it is used
+func (m *TUF) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *TUF) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *TUF) UnmarshalBinary(b []byte) error {
+ var res TUF
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_schema.go
new file mode 100644
index 00000000000..7c944ef92da
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_schema.go
@@ -0,0 +1,29 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// TUFSchema TUF Schema
+//
+// # Schema for TUF metadata objects
+//
+// swagger:model tufSchema
+type TUFSchema any
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go
new file mode 100644
index 00000000000..69b5e93d614
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go
@@ -0,0 +1,321 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ stderrors "errors"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// TUFV001Schema TUF v0.0.1 Schema
+//
+// # Schema for TUF metadata entries
+//
+// swagger:model tufV001Schema
+type TUFV001Schema struct {
+
+ // metadata
+ // Required: true
+ Metadata *TUFV001SchemaMetadata `json:"metadata"`
+
+ // root
+ // Required: true
+ Root *TUFV001SchemaRoot `json:"root"`
+
+ // TUF specification version
+ // Read Only: true
+ SpecVersion string `json:"spec_version,omitempty"`
+}
+
+// Validate validates this tuf v001 schema
+func (m *TUFV001Schema) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateMetadata(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateRoot(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *TUFV001Schema) validateMetadata(formats strfmt.Registry) error {
+
+ if err := validate.Required("metadata", "body", m.Metadata); err != nil {
+ return err
+ }
+
+ if m.Metadata != nil {
+ if err := m.Metadata.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("metadata")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("metadata")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *TUFV001Schema) validateRoot(formats strfmt.Registry) error {
+
+ if err := validate.Required("root", "body", m.Root); err != nil {
+ return err
+ }
+
+ if m.Root != nil {
+ if err := m.Root.Validate(formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("root")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("root")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this tuf v001 schema based on the context it is used
+func (m *TUFV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateMetadata(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateRoot(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateSpecVersion(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *TUFV001Schema) contextValidateMetadata(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Metadata != nil {
+
+ if err := m.Metadata.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("metadata")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("metadata")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *TUFV001Schema) contextValidateRoot(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Root != nil {
+
+ if err := m.Root.ContextValidate(ctx, formats); err != nil {
+ ve := new(errors.Validation)
+ if stderrors.As(err, &ve) {
+ return ve.ValidateName("root")
+ }
+ ce := new(errors.CompositeError)
+ if stderrors.As(err, &ce) {
+ return ce.ValidateName("root")
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *TUFV001Schema) contextValidateSpecVersion(ctx context.Context, formats strfmt.Registry) error {
+
+ if err := validate.ReadOnly(ctx, "spec_version", "body", m.SpecVersion); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *TUFV001Schema) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *TUFV001Schema) UnmarshalBinary(b []byte) error {
+ var res TUFV001Schema
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// TUFV001SchemaMetadata TUF metadata
+//
+// swagger:model TUFV001SchemaMetadata
+type TUFV001SchemaMetadata struct {
+
+ // Specifies the metadata inline within the document
+ // Required: true
+ Content any `json:"content"`
+}
+
+// Validate validates this TUF v001 schema metadata
+func (m *TUFV001SchemaMetadata) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateContent(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *TUFV001SchemaMetadata) validateContent(formats strfmt.Registry) error {
+
+ if m.Content == nil {
+ return errors.Required("metadata"+"."+"content", "body", nil)
+ }
+
+ return nil
+}
+
+// ContextValidate validates this TUF v001 schema metadata based on context it is used
+func (m *TUFV001SchemaMetadata) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *TUFV001SchemaMetadata) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *TUFV001SchemaMetadata) UnmarshalBinary(b []byte) error {
+ var res TUFV001SchemaMetadata
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// TUFV001SchemaRoot root metadata containing about the public keys used to sign the manifest
+//
+// swagger:model TUFV001SchemaRoot
+type TUFV001SchemaRoot struct {
+
+ // Specifies the metadata inline within the document
+ // Required: true
+ Content any `json:"content"`
+}
+
+// Validate validates this TUF v001 schema root
+func (m *TUFV001SchemaRoot) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateContent(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *TUFV001SchemaRoot) validateContent(formats strfmt.Registry) error {
+
+ if m.Content == nil {
+ return errors.Required("root"+"."+"content", "body", nil)
+ }
+
+ return nil
+}
+
+// ContextValidate validates this TUF v001 schema root based on context it is used
+func (m *TUFV001SchemaRoot) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *TUFV001SchemaRoot) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *TUFV001SchemaRoot) UnmarshalBinary(b []byte) error {
+ var res TUFV001SchemaRoot
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/internal/log/logger.go b/vendor/github.com/sigstore/rekor/pkg/internal/log/logger.go
new file mode 100644
index 00000000000..3d2a2ed0b95
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/internal/log/logger.go
@@ -0,0 +1,36 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package log
+
+type LoggerImpl interface {
+ Error(...any)
+ Errorf(string, ...any)
+ Panic(...any)
+ Info(...any)
+ Infof(string, ...any)
+}
+
+var Logger LoggerImpl = &discardLogger{}
+
+type discardLogger struct{}
+
+func (l *discardLogger) Panic(err ...any) {
+ panic(err)
+}
+func (l *discardLogger) Error(...any) {}
+func (l *discardLogger) Errorf(string, ...any) {}
+func (l *discardLogger) Infof(string, ...any) {}
+func (l *discardLogger) Info(...any) {}
diff --git a/vendor/github.com/sigstore/rekor/pkg/pki/identity/identity.go b/vendor/github.com/sigstore/rekor/pkg/pki/identity/identity.go
new file mode 100644
index 00000000000..4566e1ddfe1
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/pki/identity/identity.go
@@ -0,0 +1,38 @@
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package identity
+
+type Identity struct {
+ // Types include:
+ // - *rsa.PublicKey
+ // - *ecdsa.PublicKey
+ // - ed25519.PublicKey
+ // - *x509.Certificate
+ // - openpgp.EntityList (golang.org/x/crypto/openpgp)
+ // - *minisign.PublicKey (github.com/jedisct1/go-minisign)
+ // - ssh.PublicKey (golang.org/x/crypto/ssh)
+ Crypto any
+ // Raw key or certificate extracted from Crypto. Values include:
+ // - PKIX ASN.1 DER-encoded public key
+ // - ASN.1 DER-encoded certificate
+ Raw []byte
+ // For keys, certificates, and minisign, hex-encoded SHA-256 digest
+ // of the public key or certificate
+ // For SSH and PGP, Fingerprint is the standard for each ecosystem
+ // For SSH, unpadded base-64 encoded SHA-256 digest of the key
+ // For PGP, hex-encoded SHA-1 digest of a key, which can be either
+ // a primary key or subkey
+ Fingerprint string
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/pki/pkitypes/types.go b/vendor/github.com/sigstore/rekor/pkg/pki/pkitypes/types.go
new file mode 100644
index 00000000000..19688bd1bdb
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/pki/pkitypes/types.go
@@ -0,0 +1,40 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pki
+
+import (
+ "io"
+
+ "github.com/sigstore/rekor/pkg/pki/identity"
+ sigsig "github.com/sigstore/sigstore/pkg/signature"
+)
+
+// PublicKey Generic object representing a public key (regardless of format & algorithm)
+type PublicKey interface {
+ CanonicalValue() ([]byte, error)
+ // Deprecated: EmailAddresses() will be deprecated in favor of Subjects() which will
+ // also return Subject URIs present in public keys.
+ EmailAddresses() []string
+ Subjects() []string
+ // Identities returns a list of typed keys and certificates.
+ Identities() ([]identity.Identity, error)
+}
+
+// Signature Generic object representing a signature (regardless of format & algorithm)
+type Signature interface {
+ CanonicalValue() ([]byte, error)
+ Verify(r io.Reader, k interface{}, opts ...sigsig.VerifyOption) error
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/pki/x509/x509.go b/vendor/github.com/sigstore/rekor/pkg/pki/x509/x509.go
new file mode 100644
index 00000000000..2cfaf816009
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/pki/x509/x509.go
@@ -0,0 +1,276 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package x509
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/asn1"
+ "encoding/hex"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/asaskevich/govalidator"
+ "github.com/sigstore/rekor/pkg/pki/identity"
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+ sigsig "github.com/sigstore/sigstore/pkg/signature"
+)
+
+// EmailAddressOID defined by https://oidref.com/1.2.840.113549.1.9.1
+var EmailAddressOID asn1.ObjectIdentifier = []int{1, 2, 840, 113549, 1, 9, 1}
+
+type Signature struct {
+ signature []byte
+ verifierLoadOpts []sigsig.LoadOption
+}
+
+// NewSignature creates and validates an x509 signature object
+func NewSignature(r io.Reader) (*Signature, error) {
+ return NewSignatureWithOpts(r)
+}
+
+func NewSignatureWithOpts(r io.Reader, opts ...sigsig.LoadOption) (*Signature, error) {
+ b, err := io.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+ return &Signature{
+ signature: b,
+ verifierLoadOpts: opts,
+ }, nil
+}
+
+// CanonicalValue implements the pki.Signature interface
+func (s Signature) CanonicalValue() ([]byte, error) {
+ return s.signature, nil
+}
+
+// Verify implements the pki.Signature interface
+func (s Signature) Verify(r io.Reader, k interface{}, opts ...sigsig.VerifyOption) error {
+ if len(s.signature) == 0 {
+ //lint:ignore ST1005 X509 is proper use of term
+ return errors.New("X509 signature has not been initialized")
+ }
+
+ key, ok := k.(*PublicKey)
+ if !ok {
+ return fmt.Errorf("invalid public key type for: %v", k)
+ }
+
+ p := key.key
+ if p == nil {
+ switch {
+ case key.cert != nil:
+ p = key.cert.c.PublicKey
+ case len(key.certs) > 0:
+ if err := verifyCertChain(key.certs); err != nil {
+ return err
+ }
+ p = key.certs[0].PublicKey
+ default:
+ return errors.New("no public key found")
+ }
+ }
+
+ verifier, err := sigsig.LoadVerifierWithOpts(p, s.verifierLoadOpts...)
+ if err != nil {
+ return err
+ }
+ return verifier.VerifySignature(bytes.NewReader(s.signature), r, opts...)
+}
+
+// PublicKey Public Key that follows the x509 standard
+type PublicKey struct {
+ key interface{}
+ cert *cert
+ certs []*x509.Certificate
+}
+
+type cert struct {
+ c *x509.Certificate
+ b []byte
+}
+
+// NewPublicKey implements the pki.PublicKey interface
+func NewPublicKey(r io.Reader) (*PublicKey, error) {
+ rawPub, err := io.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+ trimmedRawPub := bytes.TrimSpace(rawPub)
+
+ block, rest := pem.Decode(trimmedRawPub)
+ if block == nil {
+ return nil, errors.New("invalid public key: failure decoding PEM")
+ }
+
+ // Handle certificate chain, concatenated PEM-encoded certificates
+ if len(rest) > 0 {
+ // Support up to 10 certificates in a chain, to avoid parsing extremely long chains
+ certs, err := cryptoutils.UnmarshalCertificatesFromPEMLimited(trimmedRawPub, 10)
+ if err != nil {
+ return nil, err
+ }
+ return &PublicKey{certs: certs}, nil
+ }
+
+ switch block.Type {
+ case string(cryptoutils.PublicKeyPEMType):
+ key, err := x509.ParsePKIXPublicKey(block.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ return &PublicKey{key: key}, nil
+ case string(cryptoutils.CertificatePEMType):
+ c, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ return &PublicKey{
+ cert: &cert{
+ c: c,
+ b: block.Bytes,
+ }}, nil
+ }
+ return nil, fmt.Errorf("invalid public key: cannot handle type %v", block.Type)
+}
+
+// CanonicalValue implements the pki.PublicKey interface
+func (k PublicKey) CanonicalValue() (encoded []byte, err error) {
+
+ switch {
+ case k.key != nil:
+ encoded, err = cryptoutils.MarshalPublicKeyToPEM(k.key)
+ case k.cert != nil:
+ encoded, err = cryptoutils.MarshalCertificateToPEM(k.cert.c)
+ case k.certs != nil:
+ encoded, err = cryptoutils.MarshalCertificatesToPEM(k.certs)
+ default:
+ err = errors.New("x509 public key has not been initialized")
+ }
+
+ return
+}
+
+func (k PublicKey) CryptoPubKey() crypto.PublicKey {
+ if k.cert != nil {
+ return k.cert.c.PublicKey
+ }
+ if len(k.certs) > 0 {
+ return k.certs[0].PublicKey
+ }
+ return k.key
+}
+
+// EmailAddresses implements the pki.PublicKey interface
+func (k PublicKey) EmailAddresses() []string {
+ var names []string
+ var cert *x509.Certificate
+ if k.cert != nil {
+ cert = k.cert.c
+ } else if len(k.certs) > 0 {
+ cert = k.certs[0]
+ }
+ if cert != nil {
+ for _, name := range cert.EmailAddresses {
+ if govalidator.IsEmail(name) {
+ names = append(names, strings.ToLower(name))
+ }
+ }
+ }
+ return names
+}
+
+// Subjects implements the pki.PublicKey interface
+func (k PublicKey) Subjects() []string {
+ var subjects []string
+ var cert *x509.Certificate
+ if k.cert != nil {
+ cert = k.cert.c
+ } else if len(k.certs) > 0 {
+ cert = k.certs[0]
+ }
+ if cert != nil {
+ subjects = cryptoutils.GetSubjectAlternateNames(cert)
+ }
+ return subjects
+}
+
+// Identities implements the pki.PublicKey interface
+func (k PublicKey) Identities() ([]identity.Identity, error) {
+ // k contains either a key, a cert, or a list of certs
+ if k.key != nil {
+ pkixKey, err := cryptoutils.MarshalPublicKeyToDER(k.key)
+ if err != nil {
+ return nil, err
+ }
+ digest := sha256.Sum256(pkixKey)
+ return []identity.Identity{{
+ Crypto: k.key,
+ Raw: pkixKey,
+ Fingerprint: hex.EncodeToString(digest[:]),
+ }}, nil
+ }
+
+ var cert *x509.Certificate
+ switch {
+ case k.cert != nil:
+ cert = k.cert.c
+ case len(k.certs) > 0:
+ cert = k.certs[0]
+ default:
+ return nil, errors.New("no key, certificate or certificate chain provided")
+ }
+
+ digest := sha256.Sum256(cert.Raw)
+ return []identity.Identity{{
+ Crypto: cert,
+ Raw: cert.Raw,
+ Fingerprint: hex.EncodeToString(digest[:]),
+ }}, nil
+}
+
+func verifyCertChain(certChain []*x509.Certificate) error {
+ if len(certChain) == 0 {
+ return errors.New("no certificate chain provided")
+ }
+ // No certificate chain to verify
+ if len(certChain) == 1 {
+ return nil
+ }
+ rootPool := x509.NewCertPool()
+ rootPool.AddCert(certChain[len(certChain)-1])
+ subPool := x509.NewCertPool()
+ for _, c := range certChain[1 : len(certChain)-1] {
+ subPool.AddCert(c)
+ }
+ if _, err := certChain[0].Verify(x509.VerifyOptions{
+ Roots: rootPool,
+ Intermediates: subPool,
+ // Allow any key usage
+ KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
+ // Expired certificates can be uploaded and should be verifiable
+ CurrentTime: certChain[0].NotBefore,
+ }); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/types/README.md b/vendor/github.com/sigstore/rekor/pkg/types/README.md
new file mode 100644
index 00000000000..76cb36e61ad
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/types/README.md
@@ -0,0 +1,32 @@
+# Pluggable Types
+
+## Description
+
+Rekor supports pluggable types (aka different schemas) for entries stored in the transparency log.
+
+### Currently supported types
+
+- Alpine Packages [schema](alpine/alpine_schema.json)
+ - Versions: 0.0.1
+- COSE Envelopes [schema](cose/cose_schema.json)
+ - Versions: 0.0.1
+- DSSE Envelopes [schema](dsse/dsse_schema.json)
+ - Versions: 0.0.1
+- HashedRekord [schema](hashedrekord/hashedrekord_schema.json)
+ - Versions: 0.0.1
+- Helm Provenance Files [schema](helm/helm_schema.json)
+ - Versions: 0.0.1
+- In-Toto Attestations [schema](intoto/intoto_schema.json)
+ - Versions: 0.0.1, 0.0.2
+- Java Archives (JAR Files) [schema](jar/jar_schema.json)
+ - Versions: 0.0.1
+- Rekord *(default type)* [schema](rekord/rekord_schema.json)
+ - Versions: 0.0.1
+- RFC3161 Timestamps [schema](rfc3161/rfc3161_schema.json)
+ - Versions: 0.0.1
+- RPM Packages [schema](rpm/rpm_schema.json)
+ - Versions: 0.0.1
+- TUF Metadata [schema](tuf/tuf_schema.json)
+ - Versions: 0.0.1
+
+Refer to [Rekor docs](https://docs.sigstore.dev/rekor/pluggable-types) for adding support for new types.
diff --git a/vendor/github.com/sigstore/rekor/pkg/types/dsse/README.md b/vendor/github.com/sigstore/rekor/pkg/types/dsse/README.md
new file mode 100644
index 00000000000..3244c98ec9a
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/types/dsse/README.md
@@ -0,0 +1,25 @@
+**DSSE Type Data Documentation**
+
+This document provides a definition for each field that is not otherwise described in the [dsse
+schema](https://github.com/sigstore/rekor/blob/main/pkg/types/dsse/v0.0.1/dsse_v0_0_1_schema.json). This
+document also notes any additional information about the values
+associated with each field such as the format in which the data is
+stored and any necessary transformations.
+
+**How do you identify an object as an DSSE object?**
+
+The "Body" field will include an "dsseObj" field.
+
+**Recognized content types**
+
+- [in-toto
+ statements](https://github.com/in-toto/attestation/tree/main/spec#statement)
+ are recognized and parsed. The found subject hashes are indexed so
+ they can be searched for.
+
+**What data about the envelope is stored in Rekor**
+
+Only the hash of the payload (the content covered by the digital signature inside the envelope), the hash of the entire DSSE envelope (including signatures),
+the signature(s) and their corresponding verifying materials (e.g. public key(s) or certificates) are stored.
+
+Even if Rekor is configured to use attestation storage, the entire DSSE envelope will not be stored.
diff --git a/vendor/github.com/sigstore/rekor/pkg/types/dsse/dsse.go b/vendor/github.com/sigstore/rekor/pkg/types/dsse/dsse.go
new file mode 100644
index 00000000000..9036fe5625c
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/types/dsse/dsse.go
@@ -0,0 +1,74 @@
+//
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dsse
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/sigstore/rekor/pkg/generated/models"
+ "github.com/sigstore/rekor/pkg/types"
+)
+
+const (
+ KIND = "dsse"
+)
+
+type BaseDSSEType struct {
+ types.RekorType
+}
+
+func init() {
+ types.TypeMap.Store(KIND, New)
+}
+
+func New() types.TypeImpl {
+ bit := BaseDSSEType{}
+ bit.Kind = KIND
+ bit.VersionMap = VersionMap
+ return &bit
+}
+
+var VersionMap = types.NewSemVerEntryFactoryMap()
+
+func (it BaseDSSEType) UnmarshalEntry(pe models.ProposedEntry) (types.EntryImpl, error) {
+ if pe == nil {
+ return nil, errors.New("proposed entry cannot be nil")
+ }
+
+ in, ok := pe.(*models.DSSE)
+ if !ok {
+ return nil, errors.New("cannot unmarshal non-DSSE types")
+ }
+
+ return it.VersionedUnmarshal(in, *in.APIVersion)
+}
+
+func (it *BaseDSSEType) CreateProposedEntry(ctx context.Context, version string, props types.ArtifactProperties) (models.ProposedEntry, error) {
+ if version == "" {
+ version = it.DefaultVersion()
+ }
+ ei, err := it.VersionedUnmarshal(nil, version)
+ if err != nil {
+ return nil, fmt.Errorf("fetching DSSE version implementation: %w", err)
+ }
+ return ei.CreateFromArtifactProperties(ctx, props)
+}
+
+func (it BaseDSSEType) DefaultVersion() string {
+ return "0.0.1"
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/types/dsse/dsse_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/dsse/dsse_schema.json
new file mode 100644
index 00000000000..7dc710fe215
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/types/dsse/dsse_schema.json
@@ -0,0 +1,12 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "$id": "http://rekor.sigstore.dev/types/dsse/dsse_schema.json",
+ "title": "DSSE Schema",
+ "description": "log entry schema for dsse envelopes",
+ "type": "object",
+ "oneOf": [
+ {
+ "$ref": "v0.0.1/dsse_v0_0_1_schema.json"
+ }
+ ]
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/types/dsse/v0.0.1/dsse_v0_0_1_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/dsse/v0.0.1/dsse_v0_0_1_schema.json
new file mode 100644
index 00000000000..51ebe3990af
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/types/dsse/v0.0.1/dsse_v0_0_1_schema.json
@@ -0,0 +1,96 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "$id": "http://rekor.sigstore.dev/types/dsse/dsse_v0_0_1_schema.json",
+ "title": "DSSE v0.0.1 Schema",
+ "description": "Schema for DSSE envelopes",
+ "type": "object",
+ "properties": {
+ "proposedContent": {
+ "type": "object",
+ "properties": {
+ "envelope": {
+ "description": "DSSE envelope specified as a stringified JSON object",
+ "type": "string",
+ "writeOnly": true
+ },
+ "verifiers": {
+ "description": "collection of all verification material (e.g. public keys or certificates) used to verify signatures over envelope's payload, specified as base64-encoded strings",
+ "type": "array",
+ "minItems": 1,
+ "items": {
+ "type": "string",
+ "format": "byte"
+ },
+ "writeOnly": true
+ }
+ },
+ "writeOnly": true,
+ "required": [ "envelope", "verifiers" ]
+ },
+ "signatures": {
+ "description": "extracted collection of all signatures of the envelope's payload; elements will be sorted by lexicographical order of the base64 encoded signature strings",
+ "type": "array",
+ "minItems": 1,
+ "items": {
+ "description": "a signature of the envelope's payload along with the verification material for the signature",
+ "type": "object",
+ "properties": {
+ "signature": {
+ "description": "base64 encoded signature of the payload",
+ "type": "string",
+ "pattern": "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
+ },
+ "verifier": {
+ "description": "verification material that was used to verify the corresponding signature, specified as a base64 encoded string",
+ "type": "string",
+ "format": "byte"
+ }
+ },
+ "required": [ "signature", "verifier" ]
+ },
+ "readOnly": true
+ },
+ "envelopeHash": {
+ "description": "Specifies the hash algorithm and value encompassing the entire envelope sent to Rekor",
+ "type": "object",
+ "properties": {
+ "algorithm": {
+ "description": "The hashing function used to compute the hash value",
+ "type": "string",
+ "enum": [ "sha256" ]
+ },
+ "value": {
+ "description": "The value of the computed digest over the entire envelope",
+ "type": "string"
+ }
+ },
+ "required": [ "algorithm", "value" ],
+ "readOnly": true
+ },
+ "payloadHash": {
+ "description": "Specifies the hash algorithm and value covering the payload within the DSSE envelope",
+ "type": "object",
+ "properties": {
+ "algorithm": {
+ "description": "The hashing function used to compute the hash value",
+ "type": "string",
+ "enum": [ "sha256" ]
+ },
+ "value": {
+ "description": "The value of the computed digest over the payload within the envelope",
+ "type": "string"
+ }
+ },
+ "required": [ "algorithm", "value" ],
+ "readOnly": true
+ }
+ },
+ "oneOf": [
+ {
+ "required": [ "proposedContent" ]
+ },
+ {
+ "required": [ "signatures", "envelopeHash", "payloadHash" ]
+ }
+ ]
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/types/dsse/v0.0.1/entry.go b/vendor/github.com/sigstore/rekor/pkg/types/dsse/v0.0.1/entry.go
new file mode 100644
index 00000000000..75fac99bdf3
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/types/dsse/v0.0.1/entry.go
@@ -0,0 +1,544 @@
+//
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dsse
+
+import (
+ "bytes"
+ "context"
+ "crypto"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag/conv"
+ "github.com/in-toto/in-toto-golang/in_toto"
+ "github.com/secure-systems-lab/go-securesystemslib/dsse"
+ "github.com/sigstore/rekor/pkg/generated/models"
+ "github.com/sigstore/rekor/pkg/internal/log"
+ pkitypes "github.com/sigstore/rekor/pkg/pki/pkitypes"
+ "github.com/sigstore/rekor/pkg/pki/x509"
+ "github.com/sigstore/rekor/pkg/types"
+ dsseType "github.com/sigstore/rekor/pkg/types/dsse"
+ "github.com/sigstore/sigstore/pkg/signature"
+ sigdsse "github.com/sigstore/sigstore/pkg/signature/dsse"
+)
+
+const (
+ APIVERSION = "0.0.1"
+)
+
+func init() {
+ if err := dsseType.VersionMap.SetEntryFactory(APIVERSION, NewEntry); err != nil {
+ log.Logger.Panic(err)
+ }
+}
+
+type V001Entry struct {
+ DSSEObj models.DSSEV001Schema
+ env *dsse.Envelope
+}
+
+func (v V001Entry) APIVersion() string {
+ return APIVERSION
+}
+
+func NewEntry() types.EntryImpl {
+ return &V001Entry{}
+}
+
+// IndexKeys computes the list of keys that should map back to this entry.
+// It should *never* reference v.DSSEObj.ProposedContent as those values would only
+// be present at the time of insertion
+func (v V001Entry) IndexKeys() ([]string, error) {
+ var result []string
+
+ for _, sig := range v.DSSEObj.Signatures {
+ if sig == nil || sig.Verifier == nil {
+ return result, errors.New("missing or malformed public key")
+ }
+ keyObj, err := x509.NewPublicKey(bytes.NewReader(*sig.Verifier))
+ if err != nil {
+ return result, err
+ }
+
+ canonKey, err := keyObj.CanonicalValue()
+ if err != nil {
+ return result, fmt.Errorf("could not canonicalize key: %w", err)
+ }
+
+ keyHash := sha256.Sum256(canonKey)
+ result = append(result, "sha256:"+hex.EncodeToString(keyHash[:]))
+
+ result = append(result, keyObj.Subjects()...)
+ }
+
+ if v.DSSEObj.PayloadHash != nil {
+ payloadHashKey := strings.ToLower(fmt.Sprintf("%s:%s", *v.DSSEObj.PayloadHash.Algorithm, *v.DSSEObj.PayloadHash.Value))
+ result = append(result, payloadHashKey)
+ }
+
+ if v.DSSEObj.EnvelopeHash != nil {
+ envelopeHashKey := strings.ToLower(fmt.Sprintf("%s:%s", *v.DSSEObj.EnvelopeHash.Algorithm, *v.DSSEObj.EnvelopeHash.Value))
+ result = append(result, envelopeHashKey)
+ }
+
+ if v.env == nil {
+ log.Logger.Info("DSSEObj content or DSSE envelope is nil, returning partial set of keys")
+ return result, nil
+ }
+
+ switch v.env.PayloadType {
+ case in_toto.PayloadType:
+
+ if v.env.Payload == "" {
+ log.Logger.Info("DSSEObj DSSE payload is empty")
+ return result, nil
+ }
+ decodedPayload, err := v.env.DecodeB64Payload()
+ if err != nil {
+ return result, fmt.Errorf("could not decode envelope payload: %w", err)
+ }
+ statement, err := parseStatement(decodedPayload)
+ if err != nil {
+ return result, err
+ }
+ for _, s := range statement.Subject {
+ for alg, ds := range s.Digest {
+ result = append(result, alg+":"+ds)
+ }
+ }
+ // Not all in-toto statements will contain a SLSA provenance predicate.
+ // See https://github.com/in-toto/attestation/blob/main/spec/README.md#predicate
+ // for other predicates.
+ if predicate, err := parseSlsaPredicate(decodedPayload); err == nil {
+ if predicate.Predicate.Materials != nil {
+ for _, s := range predicate.Predicate.Materials {
+ for alg, ds := range s.Digest {
+ result = append(result, alg+":"+ds)
+ }
+ }
+ }
+ }
+ default:
+ log.Logger.Infof("Unknown DSSE envelope payloadType: %s", v.env.PayloadType)
+ }
+ return result, nil
+}
+
+func parseStatement(p []byte) (*in_toto.Statement, error) {
+ ps := in_toto.Statement{}
+ if err := json.Unmarshal(p, &ps); err != nil {
+ return nil, err
+ }
+ return &ps, nil
+}
+
+func parseSlsaPredicate(p []byte) (*in_toto.ProvenanceStatement, error) {
+ predicate := in_toto.ProvenanceStatement{}
+ if err := json.Unmarshal(p, &predicate); err != nil {
+ return nil, err
+ }
+ return &predicate, nil
+}
+
+// DecodeEntry performs direct decode into the provided output pointer
+// without mutating the receiver on error.
+func DecodeEntry(input any, output *models.DSSEV001Schema) error {
+ if output == nil {
+ return fmt.Errorf("nil output *models.DSSEV001Schema")
+ }
+ var m models.DSSEV001Schema
+ // Single switch with map fast path
+ switch data := input.(type) {
+ case map[string]any:
+ mm := data
+ if pcRaw, ok := mm["proposedContent"].(map[string]any); ok {
+ m.ProposedContent = &models.DSSEV001SchemaProposedContent{}
+ if env, ok := pcRaw["envelope"].(string); ok {
+ m.ProposedContent.Envelope = &env
+ }
+ if vsIF, ok := pcRaw["verifiers"].([]any); ok {
+ m.ProposedContent.Verifiers = make([]strfmt.Base64, 0, len(vsIF))
+ for _, it := range vsIF {
+ if s, ok := it.(string); ok && s != "" {
+ outb := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
+ n, err := base64.StdEncoding.Decode(outb, []byte(s))
+ if err != nil {
+ return fmt.Errorf("failed parsing base64 data for verifier: %w", err)
+ }
+ m.ProposedContent.Verifiers = append(m.ProposedContent.Verifiers, strfmt.Base64(outb[:n]))
+ }
+ }
+ } else if vsStr, ok := pcRaw["verifiers"].([]string); ok {
+ m.ProposedContent.Verifiers = make([]strfmt.Base64, 0, len(vsStr))
+ for _, s := range vsStr {
+ if s == "" {
+ continue
+ }
+ outb := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
+ n, err := base64.StdEncoding.Decode(outb, []byte(s))
+ if err != nil {
+ return fmt.Errorf("failed parsing base64 data for verifier: %w", err)
+ }
+ m.ProposedContent.Verifiers = append(m.ProposedContent.Verifiers, strfmt.Base64(outb[:n]))
+ }
+ }
+ }
+ if sigs, ok := mm["signatures"].([]any); ok {
+ m.Signatures = make([]*models.DSSEV001SchemaSignaturesItems0, 0, len(sigs))
+ for _, s := range sigs {
+ if sm, ok := s.(map[string]any); ok {
+ item := &models.DSSEV001SchemaSignaturesItems0{}
+ if sig, ok := sm["signature"].(string); ok {
+ item.Signature = &sig
+ }
+ if vr, ok := sm["verifier"].(string); ok && vr != "" {
+ outb := make([]byte, base64.StdEncoding.DecodedLen(len(vr)))
+ n, err := base64.StdEncoding.Decode(outb, []byte(vr))
+ if err != nil {
+ return fmt.Errorf("failed parsing base64 data for signature verifier: %w", err)
+ }
+ b := strfmt.Base64(outb[:n])
+ item.Verifier = &b
+ }
+ m.Signatures = append(m.Signatures, item)
+ }
+ }
+ }
+ if eh, ok := mm["envelopeHash"].(map[string]any); ok {
+ m.EnvelopeHash = &models.DSSEV001SchemaEnvelopeHash{}
+ if alg, ok := eh["algorithm"].(string); ok {
+ m.EnvelopeHash.Algorithm = &alg
+ }
+ if val, ok := eh["value"].(string); ok {
+ m.EnvelopeHash.Value = &val
+ }
+ }
+ if ph, ok := mm["payloadHash"].(map[string]any); ok {
+ m.PayloadHash = &models.DSSEV001SchemaPayloadHash{}
+ if alg, ok := ph["algorithm"].(string); ok {
+ m.PayloadHash.Algorithm = &alg
+ }
+ if val, ok := ph["value"].(string); ok {
+ m.PayloadHash.Value = &val
+ }
+ }
+ *output = m
+ return nil
+ case *models.DSSEV001Schema:
+ if data == nil {
+ return fmt.Errorf("nil *models.DSSEV001Schema")
+ }
+ *output = *data
+ return nil
+ case models.DSSEV001Schema:
+ *output = data
+ return nil
+ default:
+ return fmt.Errorf("unsupported input type %T for DecodeEntry", input)
+ }
+}
+
+func (v *V001Entry) Unmarshal(pe models.ProposedEntry) error {
+ it, ok := pe.(*models.DSSE)
+ if !ok {
+ return errors.New("cannot unmarshal non DSSE v0.0.1 type")
+ }
+
+ dsseObj := &models.DSSEV001Schema{}
+
+ if err := DecodeEntry(it.Spec, dsseObj); err != nil {
+ return err
+ }
+
+ // field validation
+ if err := dsseObj.Validate(strfmt.Default); err != nil {
+ return err
+ }
+
+ // either we have just proposed content or the canonicalized fields
+ if dsseObj.ProposedContent == nil {
+ // then we need canonicalized fields, and all must be present (if present, they would have been validated in the above call to Validate())
+ if dsseObj.EnvelopeHash == nil || dsseObj.PayloadHash == nil || len(dsseObj.Signatures) == 0 {
+ return errors.New("either proposedContent or envelopeHash, payloadHash, and signatures must be present")
+ }
+ v.DSSEObj = *dsseObj
+ return nil
+ }
+ // if we're here, then we're trying to propose a new entry so we check to ensure client's aren't setting server-side computed fields
+ if dsseObj.EnvelopeHash != nil || dsseObj.PayloadHash != nil || len(dsseObj.Signatures) != 0 {
+ return errors.New("either proposedContent or envelopeHash, payloadHash, and signatures must be present but not both")
+ }
+
+ env := &dsse.Envelope{}
+ if err := json.Unmarshal([]byte(*dsseObj.ProposedContent.Envelope), env); err != nil {
+ return err
+ }
+
+ if len(env.Signatures) == 0 {
+ return errors.New("DSSE envelope must contain 1 or more signatures")
+ }
+
+ allPubKeyBytes := make([][]byte, 0)
+ for _, publicKey := range dsseObj.ProposedContent.Verifiers {
+ if publicKey == nil {
+ return errors.New("an invalid null verifier was provided in ProposedContent")
+ }
+
+ allPubKeyBytes = append(allPubKeyBytes, publicKey)
+ }
+
+ sigToKeyMap, err := verifyEnvelope(allPubKeyBytes, env)
+ if err != nil {
+ return err
+ }
+
+ // we need to ensure we canonicalize the ordering of signatures
+ sortedSigs := make([]string, 0, len(sigToKeyMap))
+ for sig := range sigToKeyMap {
+ sortedSigs = append(sortedSigs, sig)
+ }
+ sort.Strings(sortedSigs)
+
+ for i, sig := range sortedSigs {
+ key := sigToKeyMap[sig]
+ canonicalizedKey, err := key.CanonicalValue()
+ if err != nil {
+ return err
+ }
+ b64CanonicalizedKey := strfmt.Base64(canonicalizedKey)
+
+ dsseObj.Signatures = append(dsseObj.Signatures, &models.DSSEV001SchemaSignaturesItems0{
+ Signature: &sortedSigs[i],
+ Verifier: &b64CanonicalizedKey,
+ })
+ }
+
+ decodedPayload, err := env.DecodeB64Payload()
+ if err != nil {
+ // this shouldn't happen because failure would have occurred in verifyEnvelope call above
+ return err
+ }
+
+ payloadHash := sha256.Sum256(decodedPayload)
+ dsseObj.PayloadHash = &models.DSSEV001SchemaPayloadHash{
+ Algorithm: conv.Pointer(models.DSSEV001SchemaPayloadHashAlgorithmSha256),
+ Value: conv.Pointer(hex.EncodeToString(payloadHash[:])),
+ }
+
+ envelopeHash := sha256.Sum256([]byte(*dsseObj.ProposedContent.Envelope))
+ dsseObj.EnvelopeHash = &models.DSSEV001SchemaEnvelopeHash{
+ Algorithm: conv.Pointer(models.DSSEV001SchemaEnvelopeHashAlgorithmSha256),
+ Value: conv.Pointer(hex.EncodeToString(envelopeHash[:])),
+ }
+
+ // we've gotten through all processing without error, now update the object we're unmarshalling into
+ v.DSSEObj = *dsseObj
+ v.env = env
+
+ return nil
+}
+
+// Canonicalize returns a JSON representation of the entry to be persisted into the log. This
+// will be further canonicalized by JSON Canonicalization Scheme (JCS) before being written.
+//
+// This function should not use v.DSSEObj.ProposedContent fields as they are client provided and
+// should not be trusted; the other fields at the top level are only set server side.
+func (v *V001Entry) Canonicalize(_ context.Context) ([]byte, error) {
+ canonicalEntry := models.DSSEV001Schema{
+ Signatures: v.DSSEObj.Signatures,
+ EnvelopeHash: v.DSSEObj.EnvelopeHash,
+ PayloadHash: v.DSSEObj.PayloadHash,
+ ProposedContent: nil, // this is explicitly done as we don't want to canonicalize the envelope
+ }
+
+ for _, s := range canonicalEntry.Signatures {
+ if s.Signature == nil {
+ return nil, errors.New("canonical entry missing required signature")
+ }
+ }
+
+ sort.Slice(canonicalEntry.Signatures, func(i, j int) bool {
+ return *canonicalEntry.Signatures[i].Signature < *canonicalEntry.Signatures[j].Signature
+ })
+
+ itObj := models.DSSE{}
+ itObj.APIVersion = conv.Pointer(APIVERSION)
+ itObj.Spec = &canonicalEntry
+
+ return json.Marshal(&itObj)
+}
+
+// AttestationKey and AttestationKeyValue are not implemented so the envelopes will not be persisted in Rekor
+
+func (v V001Entry) CreateFromArtifactProperties(_ context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) {
+ returnVal := models.DSSE{}
+ re := V001Entry{
+ DSSEObj: models.DSSEV001Schema{
+ ProposedContent: &models.DSSEV001SchemaProposedContent{},
+ },
+ }
+ var err error
+ artifactBytes := props.ArtifactBytes
+ if artifactBytes == nil {
+ if props.ArtifactPath == nil {
+ return nil, errors.New("path to artifact file must be specified")
+ }
+ if props.ArtifactPath.IsAbs() {
+ return nil, errors.New("dsse envelopes cannot be fetched over HTTP(S)")
+ }
+ artifactBytes, err = os.ReadFile(filepath.Clean(props.ArtifactPath.Path))
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ env := &dsse.Envelope{}
+ if err := json.Unmarshal(artifactBytes, env); err != nil {
+ return nil, fmt.Errorf("payload must be a valid DSSE envelope: %w", err)
+ }
+
+ allPubKeyBytes := make([][]byte, 0)
+ if len(props.PublicKeyBytes) > 0 {
+ allPubKeyBytes = append(allPubKeyBytes, props.PublicKeyBytes...)
+ }
+
+ if len(props.PublicKeyPaths) > 0 {
+ for _, path := range props.PublicKeyPaths {
+ if path.IsAbs() {
+ return nil, errors.New("dsse public keys cannot be fetched over HTTP(S)")
+ }
+
+ publicKeyBytes, err := os.ReadFile(filepath.Clean(path.Path))
+ if err != nil {
+ return nil, fmt.Errorf("error reading public key file: %w", err)
+ }
+
+ allPubKeyBytes = append(allPubKeyBytes, publicKeyBytes)
+ }
+ }
+
+ keysBySig, err := verifyEnvelope(allPubKeyBytes, env)
+ if err != nil {
+ return nil, err
+ }
+ for _, key := range keysBySig {
+ canonicalKey, err := key.CanonicalValue()
+ if err != nil {
+ return nil, err
+ }
+ re.DSSEObj.ProposedContent.Verifiers = append(re.DSSEObj.ProposedContent.Verifiers, strfmt.Base64(canonicalKey))
+ }
+ re.DSSEObj.ProposedContent.Envelope = conv.Pointer(string(artifactBytes))
+
+ returnVal.Spec = re.DSSEObj
+ returnVal.APIVersion = conv.Pointer(re.APIVersion())
+
+ return &returnVal, nil
+}
+
+// verifyEnvelope takes in an array of possible key bytes and attempts to parse them as x509 public keys.
+// it then uses these to verify the envelope and makes sure that every signature on the envelope is verified.
+// it returns a map of verifiers indexed by the signature the verifier corresponds to.
+func verifyEnvelope(allPubKeyBytes [][]byte, env *dsse.Envelope) (map[string]*x509.PublicKey, error) {
+ // generate a fake id for these keys so we can get back to the key bytes and match them to their corresponding signature
+ verifierBySig := make(map[string]*x509.PublicKey)
+ allSigs := make(map[string]struct{})
+ for _, sig := range env.Signatures {
+ allSigs[sig.Sig] = struct{}{}
+ }
+
+ for _, pubKeyBytes := range allPubKeyBytes {
+ if len(allSigs) == 0 {
+ break // if all signatures have been verified, do not attempt anymore
+ }
+ key, err := x509.NewPublicKey(bytes.NewReader(pubKeyBytes))
+ if err != nil {
+ return nil, fmt.Errorf("could not parse public key as x509: %w", err)
+ }
+
+ vfr, err := signature.LoadVerifier(key.CryptoPubKey(), crypto.SHA256)
+ if err != nil {
+ return nil, fmt.Errorf("could not load verifier: %w", err)
+ }
+
+ dsseVfr, err := dsse.NewEnvelopeVerifier(&sigdsse.VerifierAdapter{SignatureVerifier: vfr})
+ if err != nil {
+ return nil, fmt.Errorf("could not use public key as a dsse verifier: %w", err)
+ }
+
+ accepted, err := dsseVfr.Verify(context.Background(), env)
+ if err != nil {
+ return nil, fmt.Errorf("could not verify envelope: %w", err)
+ }
+
+ for _, accept := range accepted {
+ delete(allSigs, accept.Sig.Sig)
+ verifierBySig[accept.Sig.Sig] = key
+ }
+ }
+
+ if len(allSigs) > 0 {
+ return nil, errors.New("all signatures must have a key that verifies it")
+ }
+
+ return verifierBySig, nil
+}
+
+func (v V001Entry) Verifiers() ([]pkitypes.PublicKey, error) {
+ if len(v.DSSEObj.Signatures) == 0 {
+ return nil, errors.New("dsse v0.0.1 entry not initialized")
+ }
+
+ var keys []pkitypes.PublicKey
+ for _, s := range v.DSSEObj.Signatures {
+ key, err := x509.NewPublicKey(bytes.NewReader(*s.Verifier))
+ if err != nil {
+ return nil, err
+ }
+ keys = append(keys, key)
+ }
+ return keys, nil
+}
+
+func (v V001Entry) ArtifactHash() (string, error) {
+ if v.DSSEObj.PayloadHash == nil || v.DSSEObj.PayloadHash.Algorithm == nil || v.DSSEObj.PayloadHash.Value == nil {
+ return "", errors.New("dsse v0.0.1 entry not initialized")
+ }
+ return strings.ToLower(fmt.Sprintf("%s:%s", *v.DSSEObj.PayloadHash.Algorithm, *v.DSSEObj.PayloadHash.Value)), nil
+}
+
+func (v V001Entry) Insertable() (bool, error) {
+ if v.DSSEObj.ProposedContent == nil {
+ return false, errors.New("missing proposed content")
+ }
+ if v.DSSEObj.ProposedContent.Envelope == nil || len(*v.DSSEObj.ProposedContent.Envelope) == 0 {
+ return false, errors.New("missing proposed DSSE envelope")
+ }
+ if len(v.DSSEObj.ProposedContent.Verifiers) == 0 {
+ return false, errors.New("missing proposed verifiers")
+ }
+
+ return true, nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/types/entries.go b/vendor/github.com/sigstore/rekor/pkg/types/entries.go
new file mode 100644
index 00000000000..06a8525bd9a
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/types/entries.go
@@ -0,0 +1,176 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "context"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "net/url"
+ "reflect"
+
+ "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-viper/mapstructure/v2"
+ "github.com/sigstore/rekor/pkg/generated/models"
+ pkitypes "github.com/sigstore/rekor/pkg/pki/pkitypes"
+)
+
+// EntryImpl specifies the behavior of a versioned type
+type EntryImpl interface {
+ APIVersion() string // the supported versions for this implementation
+ IndexKeys() ([]string, error) // the keys that should be added to the external index for this entry
+ Canonicalize(ctx context.Context) ([]byte, error) // marshal the canonical entry to be put into the tlog
+ Unmarshal(e models.ProposedEntry) error // unmarshal the abstract entry into the specific struct for this versioned type
+ CreateFromArtifactProperties(context.Context, ArtifactProperties) (models.ProposedEntry, error)
+ Verifiers() ([]pkitypes.PublicKey, error) // list of keys or certificates that can verify an entry's signature
+ ArtifactHash() (string, error) // hex-encoded artifact hash prefixed with hash name, e.g. sha256:abcdef
+ Insertable() (bool, error) // denotes whether the entry that was unmarshalled has the writeOnly fields required to validate and insert into the log
+}
+
+// EntryWithAttestationImpl specifies the behavior of a versioned type that also stores attestations
+type EntryWithAttestationImpl interface {
+ EntryImpl
+ AttestationKey() string // returns the key used to look up the attestation from storage (should be sha256:digest)
+ AttestationKeyValue() (string, []byte) // returns the key to be used when storing the attestation as well as the attestation itself
+}
+
+// ProposedEntryIterator is an iterator over a list of proposed entries
+type ProposedEntryIterator interface {
+ models.ProposedEntry
+ HasNext() bool
+ Get() models.ProposedEntry
+ GetNext() models.ProposedEntry
+}
+
+// EntryFactory describes a factory function that can generate structs for a specific versioned type
+type EntryFactory func() EntryImpl
+
+func NewProposedEntry(ctx context.Context, kind, version string, props ArtifactProperties) (models.ProposedEntry, error) {
+ if tf, found := TypeMap.Load(kind); found {
+ t := tf.(func() TypeImpl)()
+ if t == nil {
+ return nil, fmt.Errorf("error generating object for kind '%v'", kind)
+ }
+ return t.CreateProposedEntry(ctx, version, props)
+ }
+ return nil, fmt.Errorf("could not create entry for kind '%v'", kind)
+}
+
+// CreateVersionedEntry returns the specific instance for the type and version specified in the doc
+// This method should be used on the insertion flow, which validates that the specific version proposed
+// is permitted to be entered into the log.
+func CreateVersionedEntry(pe models.ProposedEntry) (EntryImpl, error) {
+ ei, err := UnmarshalEntry(pe)
+ if err != nil {
+ return nil, err
+ }
+ kind := pe.Kind()
+ if tf, found := TypeMap.Load(kind); found {
+ if !tf.(func() TypeImpl)().IsSupportedVersion(ei.APIVersion()) {
+ return nil, fmt.Errorf("entry kind '%v' does not support inserting entries of version '%v'", kind, ei.APIVersion())
+ }
+ } else {
+ return nil, fmt.Errorf("unknown kind '%v' specified", kind)
+ }
+
+ if ok, err := ei.Insertable(); !ok {
+ return nil, fmt.Errorf("entry not insertable into log: %w", err)
+ }
+
+ return ei, nil
+}
+
+// UnmarshalEntry returns the specific instance for the type and version specified in the doc
+// This method does not check for whether the version of the entry could be currently inserted into the log,
+// and is useful when dealing with entries that have been persisted to the log.
+func UnmarshalEntry(pe models.ProposedEntry) (EntryImpl, error) {
+ if pe == nil {
+ return nil, errors.New("proposed entry cannot be nil")
+ }
+
+ kind := pe.Kind()
+ if tf, found := TypeMap.Load(kind); found {
+ t := tf.(func() TypeImpl)()
+ if t == nil {
+ return nil, fmt.Errorf("error generating object for kind '%v'", kind)
+ }
+ return t.UnmarshalEntry(pe)
+ }
+ return nil, fmt.Errorf("could not unmarshal entry for kind '%v'", kind)
+}
+
+// DecodeEntry maps the (abstract) input structure into the specific entry implementation class;
+// while doing so, it detects the case where we need to convert from string to []byte and does
+// the base64 decoding required to make that happen.
+// This also detects converting from string to strfmt.DateTime
+func DecodeEntry(input, output interface{}) error {
+ cfg := mapstructure.DecoderConfig{
+ DecodeHook: func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String || t.Kind() != reflect.Slice && t != reflect.TypeOf(strfmt.DateTime{}) {
+ return data, nil
+ }
+
+ if data == nil {
+ return nil, errors.New("attempted to decode nil data")
+ }
+
+ if t == reflect.TypeOf(strfmt.DateTime{}) {
+ return strfmt.ParseDateTime(data.(string))
+ }
+
+ bytes, err := base64.StdEncoding.DecodeString(data.(string))
+ if err != nil {
+ return []byte{}, fmt.Errorf("failed parsing base64 data: %w", err)
+ }
+ return bytes, nil
+ },
+ Result: output,
+ }
+
+ dec, err := mapstructure.NewDecoder(&cfg)
+ if err != nil {
+ return fmt.Errorf("error initializing decoder: %w", err)
+ }
+
+ return dec.Decode(input)
+}
+
+// CanonicalizeEntry returns the entry marshalled in JSON according to the
+// canonicalization rules of RFC8785 to protect against any changes in golang's JSON
+// marshalling logic that may reorder elements
+func CanonicalizeEntry(ctx context.Context, entry EntryImpl) ([]byte, error) {
+ canonicalEntry, err := entry.Canonicalize(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return jsoncanonicalizer.Transform(canonicalEntry)
+}
+
+// ArtifactProperties provide a consistent struct for passing values from
+// CLI flags to the type+version specific CreateProposeEntry() methods
+type ArtifactProperties struct {
+ AdditionalAuthenticatedData []byte
+ ArtifactPath *url.URL
+ ArtifactHash string
+ ArtifactBytes []byte
+ SignaturePath *url.URL
+ SignatureBytes []byte
+ PublicKeyPaths []*url.URL
+ PublicKeyBytes [][]byte
+ PKIFormat string
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/types/error.go b/vendor/github.com/sigstore/rekor/pkg/types/error.go
new file mode 100644
index 00000000000..57f0c77518b
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/types/error.go
@@ -0,0 +1,31 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+// ValidationError indicates that there is an issue with the content in the HTTP Request that
+// should result in an HTTP 400 Bad Request error being returned to the client
+//
+// Deprecated: use InputValidationError instead to take advantage of Go's error wrapping
+type ValidationError error
+
+// InputValidationError indicates that there is an issue with the content in the HTTP Request that
+// should result in an HTTP 400 Bad Request error being returned to the client
+type InputValidationError struct {
+ Err error
+}
+
+func (v *InputValidationError) Error() string { return v.Err.Error() }
+func (v *InputValidationError) Unwrap() error { return v.Err }
diff --git a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord.go b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord.go
new file mode 100644
index 00000000000..66395c7a05a
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord.go
@@ -0,0 +1,75 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package hashedrekord
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/sigstore/rekor/pkg/generated/models"
+ "github.com/sigstore/rekor/pkg/types"
+)
+
+const (
+ KIND = "hashedrekord"
+)
+
+type BaseRekordType struct {
+ types.RekorType
+}
+
+func init() {
+ types.TypeMap.Store(KIND, New)
+}
+
+func New() types.TypeImpl {
+ brt := BaseRekordType{}
+ brt.Kind = KIND
+ brt.VersionMap = VersionMap
+ return &brt
+}
+
+var VersionMap = types.NewSemVerEntryFactoryMap()
+
+func (rt BaseRekordType) UnmarshalEntry(pe models.ProposedEntry) (types.EntryImpl, error) {
+ if pe == nil {
+ return nil, errors.New("proposed entry cannot be nil")
+ }
+
+ rekord, ok := pe.(*models.Hashedrekord)
+ if !ok {
+ return nil, fmt.Errorf("cannot unmarshal non-hashed Rekord types: %s", pe.Kind())
+ }
+
+ return rt.VersionedUnmarshal(rekord, *rekord.APIVersion)
+}
+
+func (rt *BaseRekordType) CreateProposedEntry(ctx context.Context, version string, props types.ArtifactProperties) (models.ProposedEntry, error) {
+ if version == "" {
+ version = rt.DefaultVersion()
+ }
+ ei, err := rt.VersionedUnmarshal(nil, version)
+ if err != nil {
+ return nil, fmt.Errorf("fetching hashed Rekord version implementation: %w", err)
+ }
+
+ return ei.CreateFromArtifactProperties(ctx, props)
+}
+
+func (rt BaseRekordType) DefaultVersion() string {
+ return "0.0.1"
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord_schema.json
new file mode 100644
index 00000000000..be9beaeb6a2
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord_schema.json
@@ -0,0 +1,12 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "$id": "http://rekor.sigstore.dev/types/hashedrekord/hasehedrekord_schema.json",
+ "title": "Hashedrekord Schema",
+ "description": "Schema for Hashedrekord objects",
+ "type": "object",
+ "oneOf": [
+ {
+ "$ref": "v0.0.1/hashedrekord_v0_0_1_schema.json"
+ }
+ ]
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/entry.go b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/entry.go
new file mode 100644
index 00000000000..7d0ef04e61f
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/entry.go
@@ -0,0 +1,377 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package hashedrekord
+
+import (
+ "bytes"
+ "context"
+ "crypto"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag/conv"
+
+ "github.com/sigstore/rekor/pkg/generated/models"
+ "github.com/sigstore/rekor/pkg/internal/log"
+ pkitypes "github.com/sigstore/rekor/pkg/pki/pkitypes"
+ "github.com/sigstore/rekor/pkg/pki/x509"
+ "github.com/sigstore/rekor/pkg/types"
+ hashedrekord "github.com/sigstore/rekor/pkg/types/hashedrekord"
+ "github.com/sigstore/rekor/pkg/util"
+ "github.com/sigstore/sigstore/pkg/signature/options"
+)
+
+const (
+ APIVERSION = "0.0.1"
+)
+
+func init() {
+ if err := hashedrekord.VersionMap.SetEntryFactory(APIVERSION, NewEntry); err != nil {
+ log.Logger.Panic(err)
+ }
+}
+
+type V001Entry struct {
+ HashedRekordObj models.HashedrekordV001Schema
+}
+
+func (v V001Entry) APIVersion() string {
+ return APIVERSION
+}
+
+func NewEntry() types.EntryImpl {
+ return &V001Entry{}
+}
+
+func (v V001Entry) IndexKeys() ([]string, error) {
+ var result []string
+
+ key := v.HashedRekordObj.Signature.PublicKey.Content
+ keyHash := sha256.Sum256(key)
+ result = append(result, strings.ToLower(hex.EncodeToString(keyHash[:])))
+
+ pub, err := x509.NewPublicKey(bytes.NewReader(key))
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, pub.Subjects()...)
+
+ if v.HashedRekordObj.Data.Hash != nil {
+ hashKey := strings.ToLower(fmt.Sprintf("%s:%s", *v.HashedRekordObj.Data.Hash.Algorithm, *v.HashedRekordObj.Data.Hash.Value))
+ result = append(result, hashKey)
+ }
+
+ return result, nil
+}
+
+// DecodeEntry performs direct decode into the provided output pointer
+// without mutating the receiver on error.
+func DecodeEntry(input any, output *models.HashedrekordV001Schema) error {
+ if output == nil {
+ return fmt.Errorf("nil output *models.HashedrekordV001Schema")
+ }
+ var m models.HashedrekordV001Schema
+ // Single type-switch with map fast path
+ switch data := input.(type) {
+ case map[string]any:
+ mm := data
+ if sigRaw, ok := mm["signature"].(map[string]any); ok {
+ m.Signature = &models.HashedrekordV001SchemaSignature{}
+ if c, ok := sigRaw["content"].(string); ok && c != "" {
+ outb := make([]byte, base64.StdEncoding.DecodedLen(len(c)))
+ n, err := base64.StdEncoding.Decode(outb, []byte(c))
+ if err != nil {
+ return fmt.Errorf("failed parsing base64 data for signature content: %w", err)
+ }
+ m.Signature.Content = outb[:n]
+ }
+ if pkRaw, ok := sigRaw["publicKey"].(map[string]any); ok {
+ m.Signature.PublicKey = &models.HashedrekordV001SchemaSignaturePublicKey{}
+ if c, ok := pkRaw["content"].(string); ok && c != "" {
+ outb := make([]byte, base64.StdEncoding.DecodedLen(len(c)))
+ n, err := base64.StdEncoding.Decode(outb, []byte(c))
+ if err != nil {
+ return fmt.Errorf("failed parsing base64 data for public key content: %w", err)
+ }
+ m.Signature.PublicKey.Content = outb[:n]
+ }
+ }
+ }
+ if dataRaw, ok := mm["data"].(map[string]any); ok {
+ if hRaw, ok := dataRaw["hash"].(map[string]any); ok {
+ m.Data = &models.HashedrekordV001SchemaData{Hash: &models.HashedrekordV001SchemaDataHash{}}
+ if alg, ok := hRaw["algorithm"].(string); ok {
+ m.Data.Hash.Algorithm = &alg
+ }
+ if val, ok := hRaw["value"].(string); ok {
+ m.Data.Hash.Value = &val
+ }
+ }
+ }
+ *output = m
+ return nil
+ case *models.HashedrekordV001Schema:
+ if data == nil {
+ return fmt.Errorf("nil *models.HashedrekordV001Schema")
+ }
+ *output = *data
+ return nil
+ case models.HashedrekordV001Schema:
+ *output = data
+ return nil
+ default:
+ return fmt.Errorf("unsupported input type %T for DecodeEntry", input)
+ }
+}
+
+func (v *V001Entry) Unmarshal(pe models.ProposedEntry) error {
+ rekord, ok := pe.(*models.Hashedrekord)
+ if !ok {
+ return errors.New("cannot unmarshal non Rekord v0.0.1 type")
+ }
+
+ if err := DecodeEntry(rekord.Spec, &v.HashedRekordObj); err != nil {
+ return err
+ }
+
+ // field validation
+ if err := v.HashedRekordObj.Validate(strfmt.Default); err != nil {
+ return err
+ }
+
+ // cross field validation
+ _, _, err := v.validate()
+ return err
+}
+
+func (v *V001Entry) Canonicalize(_ context.Context) ([]byte, error) {
+ sigObj, keyObj, err := v.validate()
+ if err != nil {
+ return nil, &types.InputValidationError{Err: err}
+ }
+
+ canonicalEntry := models.HashedrekordV001Schema{}
+
+ // need to canonicalize signature & key content
+ canonicalEntry.Signature = &models.HashedrekordV001SchemaSignature{}
+ canonicalEntry.Signature.Content, err = sigObj.CanonicalValue()
+ if err != nil {
+ return nil, err
+ }
+
+ // key URL (if known) is not set deliberately
+ canonicalEntry.Signature.PublicKey = &models.HashedrekordV001SchemaSignaturePublicKey{}
+ canonicalEntry.Signature.PublicKey.Content, err = keyObj.CanonicalValue()
+ if err != nil {
+ return nil, err
+ }
+
+ canonicalEntry.Data = &models.HashedrekordV001SchemaData{}
+ canonicalEntry.Data.Hash = v.HashedRekordObj.Data.Hash
+ // data content is not set deliberately
+
+ v.HashedRekordObj = canonicalEntry
+ // wrap in valid object with kind and apiVersion set
+ rekordObj := models.Hashedrekord{}
+ rekordObj.APIVersion = conv.Pointer(APIVERSION)
+ rekordObj.Spec = &canonicalEntry
+
+ return json.Marshal(&rekordObj)
+}
+
+// validate performs cross-field validation for fields in object
+func (v *V001Entry) validate() (pkitypes.Signature, pkitypes.PublicKey, error) {
+ sig := v.HashedRekordObj.Signature
+ if sig == nil {
+ return nil, nil, &types.InputValidationError{Err: errors.New("missing signature")}
+ }
+ // Hashed rekord type only works for x509 signature types
+ sigObj, err := x509.NewSignatureWithOpts(bytes.NewReader(sig.Content), options.WithED25519ph())
+ if err != nil {
+ return nil, nil, &types.InputValidationError{Err: err}
+ }
+
+ key := sig.PublicKey
+ if key == nil {
+ return nil, nil, &types.InputValidationError{Err: errors.New("missing public key")}
+ }
+ keyObj, err := x509.NewPublicKey(bytes.NewReader(key.Content))
+ if err != nil {
+ return nil, nil, &types.InputValidationError{Err: err}
+ }
+
+ data := v.HashedRekordObj.Data
+ if data == nil {
+ return nil, nil, &types.InputValidationError{Err: errors.New("missing data")}
+ }
+
+ hash := data.Hash
+ if hash == nil {
+ return nil, nil, &types.InputValidationError{Err: errors.New("missing hash")}
+ }
+
+ var alg crypto.Hash
+ switch conv.Value(hash.Algorithm) {
+ case models.HashedrekordV001SchemaDataHashAlgorithmSha384:
+ if len(*hash.Value) != crypto.SHA384.Size()*2 {
+ return nil, nil, &types.InputValidationError{Err: errors.New("invalid value for hash")}
+ }
+ alg = crypto.SHA384
+ case models.HashedrekordV001SchemaDataHashAlgorithmSha512:
+ if len(*hash.Value) != crypto.SHA512.Size()*2 {
+ return nil, nil, &types.InputValidationError{Err: errors.New("invalid value for hash")}
+ }
+ alg = crypto.SHA512
+ default:
+ if len(*hash.Value) != crypto.SHA256.Size()*2 {
+ return nil, nil, &types.InputValidationError{Err: errors.New("invalid value for hash")}
+ }
+ alg = crypto.SHA256
+ }
+
+ decoded, err := hex.DecodeString(*hash.Value)
+ if err != nil {
+ return nil, nil, err
+ }
+ if err := sigObj.Verify(nil, keyObj, options.WithDigest(decoded), options.WithCryptoSignerOpts(alg)); err != nil {
+ return nil, nil, &types.InputValidationError{Err: fmt.Errorf("verifying signature: %w", err)}
+ }
+
+ return sigObj, keyObj, nil
+}
+
+func getDataHashAlgorithm(hashAlgorithm crypto.Hash) string {
+ switch hashAlgorithm {
+ case crypto.SHA384:
+ return models.HashedrekordV001SchemaDataHashAlgorithmSha384
+ case crypto.SHA512:
+ return models.HashedrekordV001SchemaDataHashAlgorithmSha512
+ default:
+ return models.HashedrekordV001SchemaDataHashAlgorithmSha256
+ }
+}
+
+func (v V001Entry) CreateFromArtifactProperties(_ context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) {
+ returnVal := models.Hashedrekord{}
+ re := V001Entry{}
+
+ // we will need artifact, public-key, signature
+ re.HashedRekordObj.Data = &models.HashedrekordV001SchemaData{}
+
+ var err error
+
+ if props.PKIFormat != "x509" {
+ return nil, errors.New("hashedrekord entries can only be created for artifacts signed with x509-based PKI")
+ }
+
+ re.HashedRekordObj.Signature = &models.HashedrekordV001SchemaSignature{}
+ sigBytes := props.SignatureBytes
+ if sigBytes == nil {
+ if props.SignaturePath == nil {
+ return nil, errors.New("a detached signature must be provided")
+ }
+ sigBytes, err = os.ReadFile(filepath.Clean(props.SignaturePath.Path))
+ if err != nil {
+ return nil, fmt.Errorf("error reading signature file: %w", err)
+ }
+ }
+ re.HashedRekordObj.Signature.Content = strfmt.Base64(sigBytes)
+
+ re.HashedRekordObj.Signature.PublicKey = &models.HashedrekordV001SchemaSignaturePublicKey{}
+ publicKeyBytes := props.PublicKeyBytes
+ if len(publicKeyBytes) == 0 {
+ if len(props.PublicKeyPaths) != 1 {
+ return nil, errors.New("only one public key must be provided to verify detached signature")
+ }
+ keyBytes, err := os.ReadFile(filepath.Clean(props.PublicKeyPaths[0].Path))
+ if err != nil {
+ return nil, fmt.Errorf("error reading public key file: %w", err)
+ }
+ publicKeyBytes = append(publicKeyBytes, keyBytes)
+ } else if len(publicKeyBytes) != 1 {
+ return nil, errors.New("only one public key must be provided")
+ }
+
+ hashAlgorithm, hashValue := util.UnprefixSHA(props.ArtifactHash)
+ re.HashedRekordObj.Signature.PublicKey.Content = strfmt.Base64(publicKeyBytes[0])
+ re.HashedRekordObj.Data.Hash = &models.HashedrekordV001SchemaDataHash{
+ Algorithm: conv.Pointer(getDataHashAlgorithm(hashAlgorithm)),
+ Value: conv.Pointer(hashValue),
+ }
+
+ if _, _, err := re.validate(); err != nil {
+ return nil, err
+ }
+
+ returnVal.APIVersion = conv.Pointer(re.APIVersion())
+ returnVal.Spec = re.HashedRekordObj
+
+ return &returnVal, nil
+}
+
+func (v V001Entry) Verifiers() ([]pkitypes.PublicKey, error) {
+ if v.HashedRekordObj.Signature == nil || v.HashedRekordObj.Signature.PublicKey == nil || v.HashedRekordObj.Signature.PublicKey.Content == nil {
+ return nil, errors.New("hashedrekord v0.0.1 entry not initialized")
+ }
+ key, err := x509.NewPublicKey(bytes.NewReader(v.HashedRekordObj.Signature.PublicKey.Content))
+ if err != nil {
+ return nil, err
+ }
+ return []pkitypes.PublicKey{key}, nil
+}
+
+func (v V001Entry) ArtifactHash() (string, error) {
+ if v.HashedRekordObj.Data == nil || v.HashedRekordObj.Data.Hash == nil || v.HashedRekordObj.Data.Hash.Value == nil || v.HashedRekordObj.Data.Hash.Algorithm == nil {
+ return "", errors.New("hashedrekord v0.0.1 entry not initialized")
+ }
+ return strings.ToLower(fmt.Sprintf("%s:%s", *v.HashedRekordObj.Data.Hash.Algorithm, *v.HashedRekordObj.Data.Hash.Value)), nil
+}
+
+func (v V001Entry) Insertable() (bool, error) {
+ if v.HashedRekordObj.Signature == nil {
+ return false, errors.New("missing signature property")
+ }
+ if len(v.HashedRekordObj.Signature.Content) == 0 {
+ return false, errors.New("missing signature content")
+ }
+ if v.HashedRekordObj.Signature.PublicKey == nil {
+ return false, errors.New("missing publicKey property")
+ }
+ if len(v.HashedRekordObj.Signature.PublicKey.Content) == 0 {
+ return false, errors.New("missing publicKey content")
+ }
+ if v.HashedRekordObj.Data == nil {
+ return false, errors.New("missing data property")
+ }
+ if v.HashedRekordObj.Data.Hash == nil {
+ return false, errors.New("missing hash property")
+ }
+ if v.HashedRekordObj.Data.Hash.Algorithm == nil {
+ return false, errors.New("missing hash algorithm")
+ }
+ if v.HashedRekordObj.Data.Hash.Value == nil {
+ return false, errors.New("missing hash value")
+ }
+ return true, nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json
new file mode 100644
index 00000000000..3d536eb49e1
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json
@@ -0,0 +1,54 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "$id": "http://rekor.sigstore.dev/types/rekord/hashedrekord_v0_0_1_schema.json",
+ "title": "Hashed Rekor v0.0.1 Schema",
+ "description": "Schema for Hashed Rekord object",
+ "type": "object",
+ "properties": {
+ "signature": {
+ "description": "Information about the detached signature associated with the entry",
+ "type": "object",
+ "properties": {
+ "content": {
+ "description": "Specifies the content of the signature inline within the document",
+ "type": "string",
+ "format": "byte"
+ },
+ "publicKey" : {
+ "description": "The public key that can verify the signature; this can also be an X509 code signing certificate that contains the raw public key information",
+ "type": "object",
+ "properties": {
+ "content": {
+ "description": "Specifies the content of the public key or code signing certificate inline within the document",
+ "type": "string",
+ "format": "byte"
+ }
+ }
+ }
+ }
+ },
+ "data": {
+ "description": "Information about the content associated with the entry",
+ "type": "object",
+ "properties": {
+ "hash": {
+ "description": "Specifies the hash algorithm and value for the content",
+ "type": "object",
+ "properties": {
+ "algorithm": {
+ "description": "The hashing function used to compute the hash value",
+ "type": "string",
+ "enum": [ "sha256", "sha384", "sha512" ]
+ },
+ "value": {
+ "description": "The hash value for the content, as represented by a lower case hexadecimal string",
+ "type": "string"
+ }
+ },
+ "required": [ "algorithm", "value" ]
+ }
+ }
+ }
+ },
+ "required": [ "signature", "data" ]
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/types/intoto/README.md b/vendor/github.com/sigstore/rekor/pkg/types/intoto/README.md
new file mode 100644
index 00000000000..0c4d1d73faf
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/types/intoto/README.md
@@ -0,0 +1,13 @@
+**in-toto Type Data Documentation**
+
+This document provides a definition for each field that is not otherwise described in the [in-toto schema](https://github.com/sigstore/rekor/blob/main/pkg/types/intoto/v0.0.1/intoto_v0_0_1_schema.json). This document also notes any additional information about the values associated with each field such as the format in which the data is stored and any necessary transformations.
+
+**Attestation:** authenticated, machine-readable metadata about one or more software artifacts. [SLSA definition](https://github.com/slsa-framework/slsa/blob/main/controls/attestations.md)
+- The Attestation value ought to be a Base64-encoded JSON object.
+- The [in-toto Attestation specification](https://github.com/in-toto/attestation/blob/main/spec/README.md#statement) provides detailed guidance on understanding and parsing this JSON object.
+
+**AttestationType:** Identifies the type of attestation being made, such as a provenance attestation or a vulnerability scan attestation. AttestationType's value, even when prefixed with an http, is not necessarily a working URL.
+
+**How do you identify an object as an in-toto object?**
+
+The "Body" field will include an "IntotoObj" field.
diff --git a/vendor/github.com/sigstore/rekor/pkg/types/intoto/intoto.go b/vendor/github.com/sigstore/rekor/pkg/types/intoto/intoto.go
new file mode 100644
index 00000000000..2bfba39468b
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/types/intoto/intoto.go
@@ -0,0 +1,135 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package intoto
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "slices"
+
+ "github.com/sigstore/rekor/pkg/generated/models"
+ "github.com/sigstore/rekor/pkg/internal/log"
+ "github.com/sigstore/rekor/pkg/types"
+)
+
+const (
+ KIND = "intoto"
+)
+
+type BaseIntotoType struct {
+ types.RekorType
+}
+
+func init() {
+ types.TypeMap.Store(KIND, New)
+}
+
+func New() types.TypeImpl {
+ bit := BaseIntotoType{}
+ bit.Kind = KIND
+ bit.VersionMap = VersionMap
+ return &bit
+}
+
+var VersionMap = types.NewSemVerEntryFactoryMap()
+
+func (it BaseIntotoType) UnmarshalEntry(pe models.ProposedEntry) (types.EntryImpl, error) {
+ if pe == nil {
+ return nil, errors.New("proposed entry cannot be nil")
+ }
+
+ in, ok := pe.(*models.Intoto)
+ if !ok {
+ return nil, errors.New("cannot unmarshal non-Rekord types")
+ }
+
+ return it.VersionedUnmarshal(in, *in.APIVersion)
+}
+
+func (it *BaseIntotoType) CreateProposedEntry(ctx context.Context, version string, props types.ArtifactProperties) (models.ProposedEntry, error) {
+ var head ProposedIntotoEntryIterator
+ var next *ProposedIntotoEntryIterator
+ if version == "" {
+ // get default version as head of list
+ version = it.DefaultVersion()
+ ei, err := it.VersionedUnmarshal(nil, version)
+ if err != nil {
+ return nil, fmt.Errorf("fetching default Intoto version implementation: %w", err)
+ }
+ pe, err := ei.CreateFromArtifactProperties(ctx, props)
+ if err != nil {
+ return nil, fmt.Errorf("creating default Intoto entry: %w", err)
+ }
+ head.ProposedEntry = pe
+ next = &head
+ for _, v := range it.SupportedVersions() {
+ if v == it.DefaultVersion() {
+ continue
+ }
+ ei, err := it.VersionedUnmarshal(nil, v)
+ if err != nil {
+ log.Logger.Errorf("fetching Intoto version (%v) implementation: %w", v, err)
+ continue
+ }
+ versionedPE, err := ei.CreateFromArtifactProperties(ctx, props)
+ if err != nil {
+ log.Logger.Errorf("error creating Intoto entry of version (%v): %w", v, err)
+ continue
+ }
+ next.next = &ProposedIntotoEntryIterator{versionedPE, nil}
+ next = next.next.(*ProposedIntotoEntryIterator)
+ }
+ return head, nil
+ }
+
+ ei, err := it.VersionedUnmarshal(nil, version)
+ if err != nil {
+ return nil, fmt.Errorf("fetching Intoto version implementation: %w", err)
+ }
+ return ei.CreateFromArtifactProperties(ctx, props)
+}
+
+func (it BaseIntotoType) DefaultVersion() string {
+ return "0.0.2"
+}
+
+// SupportedVersions returns the supported versions for this type in the order of preference
+func (it BaseIntotoType) SupportedVersions() []string {
+ return []string{"0.0.2", "0.0.1"}
+}
+
+// IsSupportedVersion returns true if the version can be inserted into the log, and false if not
+func (it *BaseIntotoType) IsSupportedVersion(proposedVersion string) bool {
+ return slices.Contains(it.SupportedVersions(), proposedVersion)
+}
+
+type ProposedIntotoEntryIterator struct {
+ models.ProposedEntry
+ next models.ProposedEntry
+}
+
+func (p ProposedIntotoEntryIterator) HasNext() bool {
+ return p.next != nil
+}
+
+func (p ProposedIntotoEntryIterator) GetNext() models.ProposedEntry {
+ return p.next
+}
+
+func (p ProposedIntotoEntryIterator) Get() models.ProposedEntry {
+ return p.ProposedEntry
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/types/intoto/intoto_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/intoto/intoto_schema.json
new file mode 100644
index 00000000000..16f6172afa7
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/types/intoto/intoto_schema.json
@@ -0,0 +1,15 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "$id": "http://rekor.sigstore.dev/types/intoto/intoto_schema.json",
+ "title": "Intoto Schema",
+ "description": "Intoto for Rekord objects",
+ "type": "object",
+ "oneOf": [
+ {
+ "$ref": "v0.0.1/intoto_v0_0_1_schema.json"
+ },
+ {
+ "$ref": "v0.0.2/intoto_v0_0_2_schema.json"
+ }
+ ]
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.2/entry.go b/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.2/entry.go
new file mode 100644
index 00000000000..ef895151c33
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.2/entry.go
@@ -0,0 +1,648 @@
+//
+// Copyright 2022 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package intoto
+
+import (
+ "bytes"
+ "context"
+ "crypto"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "slices"
+ "strings"
+
+ "github.com/in-toto/in-toto-golang/in_toto"
+ "github.com/secure-systems-lab/go-securesystemslib/dsse"
+
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag/conv"
+
+ "github.com/sigstore/rekor/pkg/generated/models"
+ "github.com/sigstore/rekor/pkg/internal/log"
+ pkitypes "github.com/sigstore/rekor/pkg/pki/pkitypes"
+ "github.com/sigstore/rekor/pkg/pki/x509"
+ "github.com/sigstore/rekor/pkg/types"
+ "github.com/sigstore/rekor/pkg/types/intoto"
+ "github.com/sigstore/sigstore/pkg/signature"
+ "github.com/sigstore/sigstore/pkg/signature/options"
+)
+
+const (
+ APIVERSION = "0.0.2"
+)
+
+func init() {
+ if err := intoto.VersionMap.SetEntryFactory(APIVERSION, NewEntry); err != nil {
+ log.Logger.Panic(err)
+ }
+}
+
+var maxAttestationSize = 100 * 1024
+
+func SetMaxAttestationSize(limit int) {
+ maxAttestationSize = limit
+}
+
+type V002Entry struct {
+ IntotoObj models.IntotoV002Schema
+ env dsse.Envelope
+}
+
+func (v V002Entry) APIVersion() string {
+ return APIVERSION
+}
+
+func NewEntry() types.EntryImpl {
+ return &V002Entry{}
+}
+
+func (v V002Entry) IndexKeys() ([]string, error) {
+ var result []string
+
+ if v.IntotoObj.Content == nil || v.IntotoObj.Content.Envelope == nil {
+ log.Logger.Info("IntotoObj content or dsse envelope is nil")
+ return result, nil
+ }
+
+ for _, sig := range v.IntotoObj.Content.Envelope.Signatures {
+ if sig == nil || sig.PublicKey == nil {
+ return result, errors.New("malformed or missing signature")
+ }
+ keyObj, err := x509.NewPublicKey(bytes.NewReader(*sig.PublicKey))
+ if err != nil {
+ return result, err
+ }
+
+ canonKey, err := keyObj.CanonicalValue()
+ if err != nil {
+ return result, fmt.Errorf("could not canonicize key: %w", err)
+ }
+
+ keyHash := sha256.Sum256(canonKey)
+ result = append(result, "sha256:"+hex.EncodeToString(keyHash[:]))
+
+ result = append(result, keyObj.Subjects()...)
+ }
+
+ payloadKey := strings.ToLower(fmt.Sprintf("%s:%s", *v.IntotoObj.Content.PayloadHash.Algorithm, *v.IntotoObj.Content.PayloadHash.Value))
+ result = append(result, payloadKey)
+
+ // since we can't deterministically calculate this server-side (due to public keys being added inline, and also canonicalization being potentially different),
+ // we'll just skip adding this index key
+ // hashkey := strings.ToLower(fmt.Sprintf("%s:%s", *v.IntotoObj.Content.Hash.Algorithm, *v.IntotoObj.Content.Hash.Value))
+ // result = append(result, hashkey)
+
+ switch *v.IntotoObj.Content.Envelope.PayloadType {
+ case in_toto.PayloadType:
+
+ if v.IntotoObj.Content.Envelope.Payload == nil {
+ log.Logger.Info("IntotoObj DSSE payload is empty")
+ return result, nil
+ }
+ decodedPayload, err := base64.StdEncoding.DecodeString(string(v.IntotoObj.Content.Envelope.Payload))
+ if err != nil {
+ return result, fmt.Errorf("could not decode envelope payload: %w", err)
+ }
+ statement, err := parseStatement(decodedPayload)
+ if err != nil {
+ return result, err
+ }
+ for _, s := range statement.Subject {
+ for alg, ds := range s.Digest {
+ result = append(result, alg+":"+ds)
+ }
+ }
+ // Not all in-toto statements will contain a SLSA provenance predicate.
+ // See https://github.com/in-toto/attestation/blob/main/spec/README.md#predicate
+ // for other predicates.
+ if predicate, err := parseSlsaPredicate(decodedPayload); err == nil {
+ if predicate.Predicate.Materials != nil {
+ for _, s := range predicate.Predicate.Materials {
+ for alg, ds := range s.Digest {
+ result = append(result, alg+":"+ds)
+ }
+ }
+ }
+ }
+ default:
+ log.Logger.Infof("Unknown in_toto DSSE envelope Type: %s", *v.IntotoObj.Content.Envelope.PayloadType)
+ }
+ return result, nil
+}
+
+func parseStatement(p []byte) (*in_toto.Statement, error) {
+ ps := in_toto.Statement{}
+ if err := json.Unmarshal(p, &ps); err != nil {
+ return nil, err
+ }
+ return &ps, nil
+}
+
+func parseSlsaPredicate(p []byte) (*in_toto.ProvenanceStatement, error) {
+ predicate := in_toto.ProvenanceStatement{}
+ if err := json.Unmarshal(p, &predicate); err != nil {
+ return nil, err
+ }
+ return &predicate, nil
+}
+
+// DecodeEntry performs direct decode into the provided output pointer
+// without mutating the receiver on error.
+func DecodeEntry(input any, output *models.IntotoV002Schema) error {
+ if output == nil {
+ return fmt.Errorf("nil output *models.IntotoV002Schema")
+ }
+ var m models.IntotoV002Schema
+ switch in := input.(type) {
+ case map[string]any:
+ mm := in
+ m.Content = &models.IntotoV002SchemaContent{Envelope: &models.IntotoV002SchemaContentEnvelope{}}
+ if c, ok := mm["content"].(map[string]any); ok {
+ if env, ok := c["envelope"].(map[string]any); ok {
+ if pt, ok := env["payloadType"].(string); ok {
+ m.Content.Envelope.PayloadType = &pt
+ }
+ if p, ok := env["payload"].(string); ok && p != "" {
+ outb := make([]byte, base64.StdEncoding.DecodedLen(len(p)))
+ n, err := base64.StdEncoding.Decode(outb, []byte(p))
+ if err != nil {
+ return fmt.Errorf("failed parsing base64 data for payload: %w", err)
+ }
+ m.Content.Envelope.Payload = strfmt.Base64(outb[:n])
+ }
+ if raw, ok := env["signatures"]; ok {
+ switch sigs := raw.(type) {
+ case []any:
+ m.Content.Envelope.Signatures = make([]*models.IntotoV002SchemaContentEnvelopeSignaturesItems0, 0, len(sigs))
+ for _, s := range sigs {
+ if sm, ok := s.(map[string]any); ok {
+ item := &models.IntotoV002SchemaContentEnvelopeSignaturesItems0{}
+ if kid, ok := sm["keyid"].(string); ok {
+ item.Keyid = kid
+ }
+ if sig, ok := sm["sig"].(string); ok {
+ outb := make([]byte, base64.StdEncoding.DecodedLen(len(sig)))
+ n, err := base64.StdEncoding.Decode(outb, []byte(sig))
+ if err != nil {
+ return fmt.Errorf("failed parsing base64 data for signature: %w", err)
+ }
+ b := strfmt.Base64(outb[:n])
+ item.Sig = &b
+ }
+ if pk, ok := sm["publicKey"].(string); ok {
+ outb := make([]byte, base64.StdEncoding.DecodedLen(len(pk)))
+ n, err := base64.StdEncoding.Decode(outb, []byte(pk))
+ if err != nil {
+ return fmt.Errorf("failed parsing base64 data for public key: %w", err)
+ }
+ b := strfmt.Base64(outb[:n])
+ item.PublicKey = &b
+ }
+ m.Content.Envelope.Signatures = append(m.Content.Envelope.Signatures, item)
+ }
+ }
+ case []map[string]any:
+ m.Content.Envelope.Signatures = make([]*models.IntotoV002SchemaContentEnvelopeSignaturesItems0, 0, len(sigs))
+ for _, sm := range sigs {
+ item := &models.IntotoV002SchemaContentEnvelopeSignaturesItems0{}
+ if kid, ok := sm["keyid"].(string); ok {
+ item.Keyid = kid
+ }
+ if sig, ok := sm["sig"].(string); ok {
+ outb := make([]byte, base64.StdEncoding.DecodedLen(len(sig)))
+ n, err := base64.StdEncoding.Decode(outb, []byte(sig))
+ if err != nil {
+ return fmt.Errorf("failed parsing base64 data for signature: %w", err)
+ }
+ b := strfmt.Base64(outb[:n])
+ item.Sig = &b
+ }
+ if pk, ok := sm["publicKey"].(string); ok {
+ outb := make([]byte, base64.StdEncoding.DecodedLen(len(pk)))
+ n, err := base64.StdEncoding.Decode(outb, []byte(pk))
+ if err != nil {
+ return fmt.Errorf("failed parsing base64 data for public key: %w", err)
+ }
+ b := strfmt.Base64(outb[:n])
+ item.PublicKey = &b
+ }
+ m.Content.Envelope.Signatures = append(m.Content.Envelope.Signatures, item)
+ }
+ }
+ }
+ }
+ if h, ok := c["hash"].(map[string]any); ok {
+ m.Content.Hash = &models.IntotoV002SchemaContentHash{}
+ if alg, ok := h["algorithm"].(string); ok {
+ m.Content.Hash.Algorithm = &alg
+ }
+ if val, ok := h["value"].(string); ok {
+ m.Content.Hash.Value = &val
+ }
+ }
+ if ph, ok := c["payloadHash"].(map[string]any); ok {
+ m.Content.PayloadHash = &models.IntotoV002SchemaContentPayloadHash{}
+ if alg, ok := ph["algorithm"].(string); ok {
+ m.Content.PayloadHash.Algorithm = &alg
+ }
+ if val, ok := ph["value"].(string); ok {
+ m.Content.PayloadHash.Value = &val
+ }
+ }
+ }
+ *output = m
+ return nil
+ case *models.IntotoV002Schema:
+ if in == nil {
+ return fmt.Errorf("nil *models.IntotoV002Schema")
+ }
+ *output = *in
+ return nil
+ case models.IntotoV002Schema:
+ *output = in
+ return nil
+ default:
+ return fmt.Errorf("unsupported input type %T for DecodeEntry", input)
+ }
+}
+
+func (v *V002Entry) Unmarshal(pe models.ProposedEntry) error {
+ it, ok := pe.(*models.Intoto)
+ if !ok {
+ return errors.New("cannot unmarshal non Intoto v0.0.2 type")
+ }
+
+ var err error
+ if err := DecodeEntry(it.Spec, &v.IntotoObj); err != nil {
+ return err
+ }
+
+ // field validation
+ if err := v.IntotoObj.Validate(strfmt.Default); err != nil {
+ return err
+ }
+
+ if string(v.IntotoObj.Content.Envelope.Payload) == "" {
+ return nil
+ }
+
+ env := &dsse.Envelope{
+ Payload: string(v.IntotoObj.Content.Envelope.Payload),
+ PayloadType: *v.IntotoObj.Content.Envelope.PayloadType,
+ }
+
+ allPubKeyBytes := make([][]byte, 0)
+ for i, sig := range v.IntotoObj.Content.Envelope.Signatures {
+ if sig == nil {
+ v.IntotoObj.Content.Envelope.Signatures = slices.Delete(v.IntotoObj.Content.Envelope.Signatures, i, i)
+ continue
+ }
+ env.Signatures = append(env.Signatures, dsse.Signature{
+ KeyID: sig.Keyid,
+ Sig: string(*sig.Sig),
+ })
+
+ allPubKeyBytes = append(allPubKeyBytes, *sig.PublicKey)
+ }
+
+ if _, err := verifyEnvelope(allPubKeyBytes, env); err != nil {
+ return err
+ }
+
+ v.env = *env
+
+ decodedPayload, err := base64.StdEncoding.DecodeString(string(v.IntotoObj.Content.Envelope.Payload))
+ if err != nil {
+ return fmt.Errorf("could not decode envelope payload: %w", err)
+ }
+
+ h := sha256.Sum256(decodedPayload)
+ v.IntotoObj.Content.PayloadHash = &models.IntotoV002SchemaContentPayloadHash{
+ Algorithm: conv.Pointer(models.IntotoV002SchemaContentPayloadHashAlgorithmSha256),
+ Value: conv.Pointer(hex.EncodeToString(h[:])),
+ }
+
+ return nil
+}
+
+func (v *V002Entry) Canonicalize(_ context.Context) ([]byte, error) {
+ if err := v.IntotoObj.Validate(strfmt.Default); err != nil {
+ return nil, err
+ }
+
+ if v.IntotoObj.Content.Hash == nil {
+ return nil, errors.New("missing envelope digest")
+ }
+
+ if err := v.IntotoObj.Content.Hash.Validate(strfmt.Default); err != nil {
+ return nil, fmt.Errorf("error validating envelope digest: %w", err)
+ }
+
+ if v.IntotoObj.Content.PayloadHash == nil {
+ return nil, errors.New("missing payload digest")
+ }
+
+ if err := v.IntotoObj.Content.PayloadHash.Validate(strfmt.Default); err != nil {
+ return nil, fmt.Errorf("error validating payload digest: %w", err)
+ }
+
+ if len(v.IntotoObj.Content.Envelope.Signatures) == 0 {
+ return nil, errors.New("missing signatures")
+ }
+
+ canonicalEntry := models.IntotoV002Schema{
+ Content: &models.IntotoV002SchemaContent{
+ Envelope: &models.IntotoV002SchemaContentEnvelope{
+ PayloadType: v.IntotoObj.Content.Envelope.PayloadType,
+ Signatures: v.IntotoObj.Content.Envelope.Signatures,
+ },
+ Hash: v.IntotoObj.Content.Hash,
+ PayloadHash: v.IntotoObj.Content.PayloadHash,
+ },
+ }
+ itObj := models.Intoto{}
+ itObj.APIVersion = conv.Pointer(APIVERSION)
+ itObj.Spec = &canonicalEntry
+
+ return json.Marshal(&itObj)
+}
+
+// AttestationKey returns the digest of the attestation that was uploaded, to be used to lookup the attestation from storage
+func (v *V002Entry) AttestationKey() string {
+ if v.IntotoObj.Content != nil && v.IntotoObj.Content.PayloadHash != nil {
+ return fmt.Sprintf("%s:%s", *v.IntotoObj.Content.PayloadHash.Algorithm, *v.IntotoObj.Content.PayloadHash.Value)
+ }
+ return ""
+}
+
+// AttestationKeyValue returns both the key and value to be persisted into attestation storage
+func (v *V002Entry) AttestationKeyValue() (string, []byte) {
+ storageSize := base64.StdEncoding.DecodedLen(len(v.env.Payload))
+ if storageSize > maxAttestationSize {
+ log.Logger.Infof("Skipping attestation storage, size %d is greater than max %d", storageSize, maxAttestationSize)
+ return "", nil
+ }
+ attBytes, err := base64.StdEncoding.DecodeString(v.env.Payload)
+ if err != nil {
+ log.Logger.Infof("could not decode envelope payload: %w", err)
+ return "", nil
+ }
+ return v.AttestationKey(), attBytes
+}
+
+type verifier struct {
+ s signature.Signer
+ v signature.Verifier
+}
+
+func (v *verifier) KeyID() (string, error) {
+ return "", nil
+}
+
+func (v *verifier) Public() crypto.PublicKey {
+ // the dsse library uses this to generate a key ID if the KeyID function returns an empty string
+ // as well for the AcceptedKey return value. Unfortunately since key ids can be arbitrary, we don't
+ // know how to generate a matching id for the key id on the envelope's signature...
+ // dsse verify will skip verifiers whose key id doesn't match the signature's key id, unless it fails
+ // to generate one from the public key... so we trick it by returning nil ¯\_(ツ)_/¯
+ return nil
+}
+
+func (v *verifier) Sign(_ context.Context, data []byte) (sig []byte, err error) {
+ if v.s == nil {
+ return nil, errors.New("nil signer")
+ }
+ sig, err = v.s.SignMessage(bytes.NewReader(data), options.WithCryptoSignerOpts(crypto.SHA256))
+ if err != nil {
+ return nil, err
+ }
+ return sig, nil
+}
+
+func (v *verifier) Verify(_ context.Context, data, sig []byte) error {
+ if v.v == nil {
+ return errors.New("nil verifier")
+ }
+ return v.v.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data))
+}
+
+func (v V002Entry) CreateFromArtifactProperties(_ context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) {
+ returnVal := models.Intoto{}
+ re := V002Entry{
+ IntotoObj: models.IntotoV002Schema{
+ Content: &models.IntotoV002SchemaContent{
+ Envelope: &models.IntotoV002SchemaContentEnvelope{},
+ },
+ }}
+ var err error
+ artifactBytes := props.ArtifactBytes
+ if artifactBytes == nil {
+ if props.ArtifactPath == nil {
+ return nil, errors.New("path to artifact file must be specified")
+ }
+ if props.ArtifactPath.IsAbs() {
+ return nil, errors.New("intoto envelopes cannot be fetched over HTTP(S)")
+ }
+ artifactBytes, err = os.ReadFile(filepath.Clean(props.ArtifactPath.Path))
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ env := dsse.Envelope{}
+ if err := json.Unmarshal(artifactBytes, &env); err != nil {
+ return nil, fmt.Errorf("payload must be a valid dsse envelope: %w", err)
+ }
+
+ allPubKeyBytes := make([][]byte, 0)
+ if len(props.PublicKeyBytes) > 0 {
+ allPubKeyBytes = append(allPubKeyBytes, props.PublicKeyBytes...)
+ }
+
+ if len(props.PublicKeyPaths) > 0 {
+ for _, path := range props.PublicKeyPaths {
+ if path.IsAbs() {
+ return nil, errors.New("dsse public keys cannot be fetched over HTTP(S)")
+ }
+
+ publicKeyBytes, err := os.ReadFile(filepath.Clean(path.Path))
+ if err != nil {
+ return nil, fmt.Errorf("error reading public key file: %w", err)
+ }
+
+ allPubKeyBytes = append(allPubKeyBytes, publicKeyBytes)
+ }
+ }
+
+ keysBySig, err := verifyEnvelope(allPubKeyBytes, &env)
+ if err != nil {
+ return nil, err
+ }
+
+ b64 := strfmt.Base64([]byte(env.Payload))
+ re.IntotoObj.Content.Envelope.Payload = b64
+ re.IntotoObj.Content.Envelope.PayloadType = &env.PayloadType
+
+ for _, sig := range env.Signatures {
+ key, ok := keysBySig[sig.Sig]
+ if !ok {
+ return nil, errors.New("all signatures must have a key that verifies it")
+ }
+
+ canonKey, err := key.CanonicalValue()
+ if err != nil {
+ return nil, fmt.Errorf("could not canonicize key: %w", err)
+ }
+
+ keyBytes := strfmt.Base64(canonKey)
+ sigBytes := strfmt.Base64([]byte(sig.Sig))
+ re.IntotoObj.Content.Envelope.Signatures = append(re.IntotoObj.Content.Envelope.Signatures, &models.IntotoV002SchemaContentEnvelopeSignaturesItems0{
+ Keyid: sig.KeyID,
+ Sig: &sigBytes,
+ PublicKey: &keyBytes,
+ })
+ }
+
+ h := sha256.Sum256([]byte(artifactBytes))
+ re.IntotoObj.Content.Hash = &models.IntotoV002SchemaContentHash{
+ Algorithm: conv.Pointer(models.IntotoV001SchemaContentHashAlgorithmSha256),
+ Value: conv.Pointer(hex.EncodeToString(h[:])),
+ }
+
+ returnVal.Spec = re.IntotoObj
+ returnVal.APIVersion = conv.Pointer(re.APIVersion())
+
+ return &returnVal, nil
+}
+
+// verifyEnvelope takes in an array of possible key bytes and attempts to parse them as x509 public keys.
+// it then uses these to verify the envelope and makes sure that every signature on the envelope is verified.
+// it returns a map of verifiers indexed by the signature the verifier corresponds to.
+func verifyEnvelope(allPubKeyBytes [][]byte, env *dsse.Envelope) (map[string]*x509.PublicKey, error) {
+ // generate a fake id for these keys so we can get back to the key bytes and match them to their corresponding signature
+ verifierBySig := make(map[string]*x509.PublicKey)
+ allSigs := make(map[string]struct{})
+ for _, sig := range env.Signatures {
+ allSigs[sig.Sig] = struct{}{}
+ }
+
+ for _, pubKeyBytes := range allPubKeyBytes {
+ key, err := x509.NewPublicKey(bytes.NewReader(pubKeyBytes))
+ if err != nil {
+ return nil, fmt.Errorf("could not parse public key as x509: %w", err)
+ }
+
+ vfr, err := signature.LoadVerifier(key.CryptoPubKey(), crypto.SHA256)
+ if err != nil {
+ return nil, fmt.Errorf("could not load verifier: %w", err)
+ }
+
+ dsseVfr, err := dsse.NewEnvelopeVerifier(&verifier{
+ v: vfr,
+ })
+
+ if err != nil {
+ return nil, fmt.Errorf("could not use public key as a dsse verifier: %w", err)
+ }
+
+ accepted, err := dsseVfr.Verify(context.Background(), env)
+ if err != nil {
+ return nil, fmt.Errorf("could not verify envelope: %w", err)
+ }
+
+ for _, accept := range accepted {
+ delete(allSigs, accept.Sig.Sig)
+ verifierBySig[accept.Sig.Sig] = key
+ }
+ }
+
+ if len(allSigs) > 0 {
+ return nil, errors.New("all signatures must have a key that verifies it")
+ }
+
+ return verifierBySig, nil
+}
+
+func (v V002Entry) Verifiers() ([]pkitypes.PublicKey, error) {
+ if v.IntotoObj.Content == nil || v.IntotoObj.Content.Envelope == nil {
+ return nil, errors.New("intoto v0.0.2 entry not initialized")
+ }
+
+ sigs := v.IntotoObj.Content.Envelope.Signatures
+ if len(sigs) == 0 {
+ return nil, errors.New("no signatures found on intoto entry")
+ }
+
+ var keys []pkitypes.PublicKey
+ for _, s := range v.IntotoObj.Content.Envelope.Signatures {
+ key, err := x509.NewPublicKey(bytes.NewReader(*s.PublicKey))
+ if err != nil {
+ return nil, err
+ }
+ keys = append(keys, key)
+ }
+ return keys, nil
+}
+
+func (v V002Entry) ArtifactHash() (string, error) {
+ if v.IntotoObj.Content == nil || v.IntotoObj.Content.PayloadHash == nil || v.IntotoObj.Content.PayloadHash.Algorithm == nil || v.IntotoObj.Content.PayloadHash.Value == nil {
+ return "", errors.New("intoto v0.0.2 entry not initialized")
+ }
+ return strings.ToLower(fmt.Sprintf("%s:%s", *v.IntotoObj.Content.PayloadHash.Algorithm, *v.IntotoObj.Content.PayloadHash.Value)), nil
+}
+
+func (v V002Entry) Insertable() (bool, error) {
+ if v.IntotoObj.Content == nil {
+ return false, errors.New("missing content property")
+ }
+ if v.IntotoObj.Content.Envelope == nil {
+ return false, errors.New("missing envelope property")
+ }
+ if len(v.IntotoObj.Content.Envelope.Payload) == 0 {
+ return false, errors.New("missing envelope content")
+ }
+
+ if v.IntotoObj.Content.Envelope.PayloadType == nil || len(*v.IntotoObj.Content.Envelope.PayloadType) == 0 {
+ return false, errors.New("missing payloadType content")
+ }
+
+ if len(v.IntotoObj.Content.Envelope.Signatures) == 0 {
+ return false, errors.New("missing signatures content")
+ }
+ for _, sig := range v.IntotoObj.Content.Envelope.Signatures {
+ if sig == nil {
+ return false, errors.New("missing signature entry")
+ }
+ if sig.Sig == nil || len(*sig.Sig) == 0 {
+ return false, errors.New("missing signature content")
+ }
+ if sig.PublicKey == nil || len(*sig.PublicKey) == 0 {
+ return false, errors.New("missing publicKey content")
+ }
+ }
+
+ if v.env.Payload == "" || v.env.PayloadType == "" || len(v.env.Signatures) == 0 {
+ return false, errors.New("invalid DSSE envelope")
+ }
+
+ return true, nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.2/intoto_v0_0_2_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.2/intoto_v0_0_2_schema.json
new file mode 100644
index 00000000000..3c404a30a02
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.2/intoto_v0_0_2_schema.json
@@ -0,0 +1,105 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "$id": "http://rekor.sigstore.dev/types/intoto/intoto_v0_0_2_schema.json",
+ "title": "intoto v0.0.2 Schema",
+ "description": "Schema for intoto object",
+ "type": "object",
+ "properties": {
+ "content": {
+ "type": "object",
+ "properties": {
+ "envelope": {
+ "description": "dsse envelope",
+ "type": "object",
+ "properties": {
+ "payload": {
+ "description": "payload of the envelope",
+ "type": "string",
+ "format": "byte",
+ "writeOnly": true
+ },
+ "payloadType": {
+ "description": "type describing the payload",
+ "type": "string"
+ },
+ "signatures": {
+ "description": "collection of all signatures of the envelope's payload",
+ "type": "array",
+ "minItems": 1,
+ "items": {
+ "description": "a signature of the envelope's payload along with the public key for the signature",
+ "type": "object",
+ "properties": {
+ "keyid": {
+ "description": "optional id of the key used to create the signature",
+ "type": "string"
+ },
+ "sig": {
+ "description": "signature of the payload",
+ "type": "string",
+ "format": "byte"
+ },
+ "publicKey": {
+ "description": "public key that corresponds to this signature",
+ "type": "string",
+ "format": "byte"
+ }
+ },
+ "required": ["sig", "publicKey"]
+ }
+ }
+ },
+ "required": ["payloadType", "signatures"]
+ },
+ "hash": {
+ "description": "Specifies the hash algorithm and value encompassing the entire signed envelope",
+ "type": "object",
+ "properties": {
+ "algorithm": {
+ "description": "The hashing function used to compute the hash value",
+ "type": "string",
+ "enum": [
+ "sha256"
+ ]
+ },
+ "value": {
+ "description": "The hash value for the archive",
+ "type": "string"
+ }
+ },
+ "required": [
+ "algorithm",
+ "value"
+ ],
+ "readOnly": true
+ },
+ "payloadHash": {
+ "description": "Specifies the hash algorithm and value covering the payload within the DSSE envelope",
+ "type": "object",
+ "properties": {
+ "algorithm": {
+ "description": "The hashing function used to compute the hash value",
+ "type": "string",
+ "enum": [ "sha256" ]
+ },
+ "value": {
+ "description": "The hash value of the payload",
+ "type": "string"
+ }
+ },
+ "required": [
+ "algorithm",
+ "value"
+ ],
+ "readOnly": true
+ }
+ },
+ "required": [
+ "envelope"
+ ]
+ }
+ },
+ "required": [
+ "content"
+ ]
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/types/test_util.go b/vendor/github.com/sigstore/rekor/pkg/types/test_util.go
new file mode 100644
index 00000000000..b4daa3556e7
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/types/test_util.go
@@ -0,0 +1,94 @@
+/*
+Copyright © 2021 The Sigstore Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package types
+
+import (
+ "context"
+
+ "github.com/go-openapi/strfmt"
+
+ "github.com/sigstore/rekor/pkg/generated/models"
+ pkitypes "github.com/sigstore/rekor/pkg/pki/pkitypes"
+)
+
+type BaseUnmarshalTester struct{}
+
+func (u BaseUnmarshalTester) NewEntry() EntryImpl {
+ return &BaseUnmarshalTester{}
+}
+
+func (u BaseUnmarshalTester) ArtifactHash() (string, error) {
+ return "", nil
+}
+
+func (u BaseUnmarshalTester) Verifiers() ([]pkitypes.PublicKey, error) {
+ return nil, nil
+}
+
+func (u BaseUnmarshalTester) APIVersion() string {
+ return "2.0.1"
+}
+
+func (u BaseUnmarshalTester) IndexKeys() ([]string, error) {
+ return []string{}, nil
+}
+
+func (u BaseUnmarshalTester) Canonicalize(_ context.Context) ([]byte, error) {
+ return nil, nil
+}
+
+func (u BaseUnmarshalTester) Unmarshal(_ models.ProposedEntry) error {
+ return nil
+}
+
+func (u BaseUnmarshalTester) Validate() error {
+ return nil
+}
+
+func (u BaseUnmarshalTester) AttestationKey() string {
+ return ""
+}
+
+func (u BaseUnmarshalTester) AttestationKeyValue() (string, []byte) {
+ return "", nil
+}
+
+func (u BaseUnmarshalTester) CreateFromArtifactProperties(_ context.Context, _ ArtifactProperties) (models.ProposedEntry, error) {
+ return nil, nil
+}
+
+func (u BaseUnmarshalTester) Insertable() (bool, error) {
+ return false, nil
+}
+
+type BaseProposedEntryTester struct{}
+
+func (b BaseProposedEntryTester) Kind() string {
+ return "nil"
+}
+
+func (b BaseProposedEntryTester) SetKind(_ string) {
+
+}
+
+func (b BaseProposedEntryTester) Validate(_ strfmt.Registry) error {
+ return nil
+}
+
+func (b BaseProposedEntryTester) ContextValidate(_ context.Context, _ strfmt.Registry) error {
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/types/types.go b/vendor/github.com/sigstore/rekor/pkg/types/types.go
new file mode 100644
index 00000000000..72722321b18
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/types/types.go
@@ -0,0 +1,88 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "slices"
+ "sync"
+
+ "github.com/sigstore/rekor/pkg/generated/models"
+)
+
+// TypeMap stores mapping between type strings and entry constructors
+// entries are written once at process initialization and read for each transaction, so we use
+// sync.Map which is optimized for this case
+var TypeMap sync.Map
+
+// RekorType is the base struct that is embedded in all type implementations
+type RekorType struct {
+ Kind string // this is the unique string that identifies the type
+ VersionMap VersionEntryFactoryMap // this maps the supported versions to implementation
+}
+
+// TypeImpl is implemented by all types to support the polymorphic conversion of abstract
+// proposed entry to a working implementation for the versioned type requested, if supported
+type TypeImpl interface {
+ CreateProposedEntry(context.Context, string, ArtifactProperties) (models.ProposedEntry, error)
+ DefaultVersion() string
+ SupportedVersions() []string
+ IsSupportedVersion(string) bool
+ UnmarshalEntry(pe models.ProposedEntry) (EntryImpl, error)
+}
+
+// VersionedUnmarshal extracts the correct implementing factory function from the type's version map,
+// creates an entry of that versioned type and then calls that versioned type's unmarshal method
+func (rt *RekorType) VersionedUnmarshal(pe models.ProposedEntry, version string) (EntryImpl, error) {
+ ef, err := rt.VersionMap.GetEntryFactory(version)
+ if err != nil {
+ return nil, fmt.Errorf("%s implementation for version '%v' not found: %w", rt.Kind, version, err)
+ }
+ entry := ef()
+ if entry == nil {
+ return nil, errors.New("failure generating object")
+ }
+ if pe == nil {
+ return entry, nil
+ }
+ return entry, entry.Unmarshal(pe)
+}
+
+// SupportedVersions returns a list of versions of this type that can be currently entered into the log
+func (rt *RekorType) SupportedVersions() []string {
+ return rt.VersionMap.SupportedVersions()
+}
+
+// IsSupportedVersion returns true if the version can be inserted into the log, and false if not
+func (rt *RekorType) IsSupportedVersion(proposedVersion string) bool {
+ return slices.Contains(rt.SupportedVersions(), proposedVersion)
+}
+
+// ListImplementedTypes returns a list of all type strings currently known to
+// be implemented
+func ListImplementedTypes() []string {
+ retVal := []string{}
+ TypeMap.Range(func(k interface{}, v interface{}) bool {
+ tf := v.(func() TypeImpl)
+ for _, verStr := range tf().SupportedVersions() {
+ retVal = append(retVal, fmt.Sprintf("%v:%v", k.(string), verStr))
+ }
+ return true
+ })
+ return retVal
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/types/versionmap.go b/vendor/github.com/sigstore/rekor/pkg/types/versionmap.go
new file mode 100644
index 00000000000..d2716370994
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/types/versionmap.go
@@ -0,0 +1,97 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/blang/semver"
+ "github.com/sigstore/rekor/pkg/internal/log"
+)
+
+// VersionEntryFactoryMap defines a map-like interface to find the correct implementation for a version string
+// This could be a simple map[string][EntryFactory], or something more elegant (e.g. semver)
+type VersionEntryFactoryMap interface {
+ GetEntryFactory(string) (EntryFactory, error) // return the entry factory for the specified version
+ SetEntryFactory(string, EntryFactory) error // set the entry factory for the specified version
+ Count() int // return the count of entry factories currently in the map
+ SupportedVersions() []string // return a list of versions currently stored in the map
+}
+
+// SemVerEntryFactoryMap implements a map that allows implementations to specify their supported versions using
+// semver-compliant strings
+type SemVerEntryFactoryMap struct {
+ factoryMap map[string]EntryFactory
+
+ sync.RWMutex
+}
+
+func NewSemVerEntryFactoryMap() VersionEntryFactoryMap {
+ s := SemVerEntryFactoryMap{}
+ s.factoryMap = make(map[string]EntryFactory)
+ return &s
+}
+
+func (s *SemVerEntryFactoryMap) Count() int {
+ return len(s.factoryMap)
+}
+
+func (s *SemVerEntryFactoryMap) GetEntryFactory(version string) (EntryFactory, error) {
+ s.RLock()
+ defer s.RUnlock()
+
+ semverToMatch, err := semver.Parse(version)
+ if err != nil {
+ log.Logger.Error(err)
+ return nil, err
+ }
+
+ // will return first function that matches
+ for k, v := range s.factoryMap {
+ semverRange, err := semver.ParseRange(k)
+ if err != nil {
+ log.Logger.Error(err)
+ return nil, err
+ }
+
+ if semverRange(semverToMatch) {
+ return v, nil
+ }
+ }
+ return nil, fmt.Errorf("unable to locate entry for version %s", version)
+}
+
+func (s *SemVerEntryFactoryMap) SetEntryFactory(constraint string, ef EntryFactory) error {
+ s.Lock()
+ defer s.Unlock()
+
+ if _, err := semver.ParseRange(constraint); err != nil {
+ log.Logger.Error(err)
+ return err
+ }
+
+ s.factoryMap[constraint] = ef
+ return nil
+}
+
+func (s *SemVerEntryFactoryMap) SupportedVersions() []string {
+ var versions []string
+ for k := range s.factoryMap {
+ versions = append(versions, k)
+ }
+ return versions
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/util/checkpoint.go b/vendor/github.com/sigstore/rekor/pkg/util/checkpoint.go
new file mode 100644
index 00000000000..27b246d7997
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/util/checkpoint.go
@@ -0,0 +1,165 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/sigstore/sigstore/pkg/signature"
+ "github.com/sigstore/sigstore/pkg/signature/options"
+)
+
+// heavily borrowed from https://github.com/transparency-dev/formats/blob/main/log/checkpoint.go
+
+type Checkpoint struct {
+ // Origin is the unique identifier/version string
+ Origin string
+ // Size is the number of entries in the log at this checkpoint.
+ Size uint64
+ // Hash is the hash which commits to the contents of the entire log.
+ Hash []byte
+ // OtherContent is any additional data to be included in the signed payload; each element is assumed to be one line
+ OtherContent []string
+}
+
+// String returns the String representation of the Checkpoint
+func (c Checkpoint) String() string {
+ var b strings.Builder
+ fmt.Fprintf(&b, "%s\n%d\n%s\n", c.Origin, c.Size, base64.StdEncoding.EncodeToString(c.Hash))
+ for _, line := range c.OtherContent {
+ fmt.Fprintf(&b, "%s\n", line)
+ }
+ return b.String()
+}
+
+// MarshalCheckpoint returns the common format representation of this Checkpoint.
+func (c Checkpoint) MarshalCheckpoint() ([]byte, error) {
+ return []byte(c.String()), nil
+}
+
+// UnmarshalCheckpoint parses the common formatted checkpoint data and stores the result
+// in the Checkpoint.
+//
+// The supplied data is expected to begin with the following 3 lines of text,
+// each followed by a newline:
+//
+//
+//
+// ...
+// ...
+//
+// This will discard any content found after the checkpoint (including signatures)
+func (c *Checkpoint) UnmarshalCheckpoint(data []byte) error {
+ l := bytes.Split(data, []byte("\n"))
+ if len(l) < 4 {
+ return errors.New("invalid checkpoint - too few newlines")
+ }
+ origin := string(l[0])
+ if len(origin) == 0 {
+ return errors.New("invalid checkpoint - empty ecosystem")
+ }
+ size, err := strconv.ParseUint(string(l[1]), 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid checkpoint - size invalid: %w", err)
+ }
+ h, err := base64.StdEncoding.DecodeString(string(l[2]))
+ if err != nil {
+ return fmt.Errorf("invalid checkpoint - invalid hash: %w", err)
+ }
+ *c = Checkpoint{
+ Origin: origin,
+ Size: size,
+ Hash: h,
+ }
+ if len(l) >= 3 {
+ for _, line := range l[3:] {
+ if len(line) == 0 {
+ break
+ }
+ c.OtherContent = append(c.OtherContent, string(line))
+ }
+ }
+ return nil
+}
+
+type SignedCheckpoint struct {
+ Checkpoint
+ SignedNote
+}
+
+func CreateSignedCheckpoint(c Checkpoint) (*SignedCheckpoint, error) {
+ text, err := c.MarshalCheckpoint()
+ if err != nil {
+ return nil, err
+ }
+ return &SignedCheckpoint{
+ Checkpoint: c,
+ SignedNote: SignedNote{Note: string(text)},
+ }, nil
+}
+
+func SignedCheckpointValidator(strToValidate string) bool {
+ s := SignedNote{}
+ if err := s.UnmarshalText([]byte(strToValidate)); err != nil {
+ return false
+ }
+ c := &Checkpoint{}
+ return c.UnmarshalCheckpoint([]byte(s.Note)) == nil
+}
+
+func CheckpointValidator(strToValidate string) bool {
+ c := &Checkpoint{}
+ return c.UnmarshalCheckpoint([]byte(strToValidate)) == nil
+}
+
+func (r *SignedCheckpoint) UnmarshalText(data []byte) error {
+ s := SignedNote{}
+ if err := s.UnmarshalText([]byte(data)); err != nil {
+ return fmt.Errorf("unmarshalling signed note: %w", err)
+ }
+ c := Checkpoint{}
+ if err := c.UnmarshalCheckpoint([]byte(s.Note)); err != nil {
+ return fmt.Errorf("unmarshalling checkpoint: %w", err)
+ }
+ *r = SignedCheckpoint{Checkpoint: c, SignedNote: s}
+ return nil
+}
+
+// CreateAndSignCheckpoint creates a signed checkpoint as a commitment to the current root hash
+func CreateAndSignCheckpoint(ctx context.Context, hostname string, treeID int64, treeSize uint64, rootHash []byte, signer signature.Signer) ([]byte, error) {
+ sth, err := CreateSignedCheckpoint(Checkpoint{
+ Origin: fmt.Sprintf("%s - %d", hostname, treeID),
+ Size: treeSize,
+ Hash: rootHash,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("error creating checkpoint: %w", err)
+ }
+ if _, err := sth.Sign(hostname, signer, options.WithContext(ctx)); err != nil {
+ return nil, fmt.Errorf("error signing checkpoint: %w", err)
+ }
+ scBytes, err := sth.MarshalText()
+ if err != nil {
+ return nil, fmt.Errorf("error marshalling checkpoint: %w", err)
+ }
+ return scBytes, nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/util/fetch.go b/vendor/github.com/sigstore/rekor/pkg/util/fetch.go
new file mode 100644
index 00000000000..7f8e93fb046
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/util/fetch.go
@@ -0,0 +1,49 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+)
+
+// FileOrURLReadCloser Note: caller is responsible for closing ReadCloser returned from method!
+func FileOrURLReadCloser(ctx context.Context, url string, content []byte) (io.ReadCloser, error) {
+ var dataReader io.ReadCloser
+ if url != "" {
+ //TODO: set timeout here, SSL settings?
+ client := &http.Client{}
+ req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ if resp.StatusCode < 200 || resp.StatusCode > 299 {
+ return nil, fmt.Errorf("error received while fetching artifact '%v': %v", url, resp.Status)
+ }
+
+ dataReader = resp.Body
+ } else {
+ dataReader = io.NopCloser(bytes.NewReader(content))
+ }
+ return dataReader, nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/util/sha.go b/vendor/github.com/sigstore/rekor/pkg/util/sha.go
new file mode 100644
index 00000000000..07b8fb1b53d
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/util/sha.go
@@ -0,0 +1,79 @@
+// Copyright 2022 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "crypto"
+ "fmt"
+ "strings"
+)
+
+// PrefixSHA sets the prefix of a sha hash to match how it is stored based on the length.
+func PrefixSHA(sha string) string {
+ var prefix string
+ var components = strings.Split(sha, ":")
+
+ if len(components) == 2 {
+ return sha
+ }
+
+ switch len(sha) {
+ case 40:
+ prefix = "sha1:"
+ case 64:
+ prefix = "sha256:"
+ case 96:
+ prefix = "sha384:"
+ case 128:
+ prefix = "sha512:"
+ }
+
+ return fmt.Sprintf("%v%v", prefix, sha)
+}
+
+func UnprefixSHA(sha string) (crypto.Hash, string) {
+ components := strings.Split(sha, ":")
+
+ if len(components) == 2 {
+ prefix := components[0]
+ sha = components[1]
+
+ switch prefix {
+ case "sha1":
+ return crypto.SHA1, sha
+ case "sha256":
+ return crypto.SHA256, sha
+ case "sha384":
+ return crypto.SHA384, sha
+ case "sha512":
+ return crypto.SHA512, sha
+ default:
+ return crypto.Hash(0), ""
+ }
+ }
+
+ switch len(sha) {
+ case 40:
+ return crypto.SHA1, sha
+ case 64:
+ return crypto.SHA256, sha
+ case 96:
+ return crypto.SHA384, sha
+ case 128:
+ return crypto.SHA512, sha
+ }
+
+ return crypto.Hash(0), ""
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/util/signed_note.go b/vendor/github.com/sigstore/rekor/pkg/util/signed_note.go
new file mode 100644
index 00000000000..4c9c8f8a70f
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/util/signed_note.go
@@ -0,0 +1,211 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "bufio"
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/sigstore/sigstore/pkg/signature"
+ "github.com/sigstore/sigstore/pkg/signature/options"
+ "golang.org/x/mod/sumdb/note"
+)
+
+type SignedNote struct {
+ // Textual representation of a note to sign.
+ Note string
+ // Signatures are one or more signature lines covering the payload
+ Signatures []note.Signature
+}
+
+// Sign adds a signature to a SignedCheckpoint object
+// The signature is added to the signature array as well as being directly returned to the caller
+func (s *SignedNote) Sign(identity string, signer signature.Signer, opts signature.SignOption) (*note.Signature, error) {
+ sig, err := signer.SignMessage(bytes.NewReader([]byte(s.Note)), opts)
+ if err != nil {
+ return nil, fmt.Errorf("signing note: %w", err)
+ }
+
+ pk, err := signer.PublicKey()
+ if err != nil {
+ return nil, fmt.Errorf("retrieving public key: %w", err)
+ }
+ pkHash, err := getPublicKeyHash(pk)
+ if err != nil {
+ return nil, err
+ }
+
+ signature := note.Signature{
+ Name: identity,
+ Hash: pkHash,
+ Base64: base64.StdEncoding.EncodeToString(sig),
+ }
+
+ s.Signatures = append(s.Signatures, signature)
+ return &signature, nil
+}
+
+// Verify checks that one of the signatures can be successfully verified using
+// the supplied public key
+func (s SignedNote) Verify(verifier signature.Verifier) bool {
+ if len(s.Signatures) == 0 {
+ return false
+ }
+
+ msg := []byte(s.Note)
+ digest := sha256.Sum256(msg)
+
+ pk, err := verifier.PublicKey()
+ if err != nil {
+ return false
+ }
+ verifierPkHash, err := getPublicKeyHash(pk)
+ if err != nil {
+ return false
+ }
+
+ for _, s := range s.Signatures {
+ sigBytes, err := base64.StdEncoding.DecodeString(s.Base64)
+ if err != nil {
+ return false
+ }
+
+ if s.Hash != verifierPkHash {
+ return false
+ }
+
+ opts := []signature.VerifyOption{}
+ switch pk.(type) {
+ case *rsa.PublicKey, *ecdsa.PublicKey:
+ opts = append(opts, options.WithDigest(digest[:]))
+ case ed25519.PublicKey:
+ break
+ default:
+ return false
+ }
+ if err := verifier.VerifySignature(bytes.NewReader(sigBytes), bytes.NewReader(msg), opts...); err != nil {
+ return false
+ }
+ }
+ return true
+}
+
+// MarshalText returns the common format representation of this SignedNote.
+func (s SignedNote) MarshalText() ([]byte, error) {
+ return []byte(s.String()), nil
+}
+
+// String returns the String representation of the SignedNote
+func (s SignedNote) String() string {
+ var b strings.Builder
+ b.WriteString(s.Note)
+ b.WriteRune('\n')
+ for _, sig := range s.Signatures {
+ var hbuf [4]byte
+ binary.BigEndian.PutUint32(hbuf[:], sig.Hash)
+ sigBytes, _ := base64.StdEncoding.DecodeString(sig.Base64)
+ b64 := base64.StdEncoding.EncodeToString(append(hbuf[:], sigBytes...))
+ fmt.Fprintf(&b, "%c %s %s\n", '\u2014', sig.Name, b64)
+ }
+
+ return b.String()
+}
+
+// UnmarshalText parses the common formatted signed note data and stores the result
+// in the SignedNote. THIS DOES NOT VERIFY SIGNATURES INSIDE THE CONTENT!
+//
+// The supplied data is expected to contain a single Note, followed by a single
+// line with no comment, followed by one or more lines with the following format:
+//
+// \u2014 name signature
+//
+// - name is the string associated with the signer
+// - signature is a base64 encoded string; the first 4 bytes of the decoded value is a
+// hint to the public key; it is a big-endian encoded uint32 representing the first
+// 4 bytes of the SHA256 hash of the public key
+func (s *SignedNote) UnmarshalText(data []byte) error {
+ sigSplit := []byte("\n\n")
+ // Must end with signature block preceded by blank line.
+ split := bytes.LastIndex(data, sigSplit)
+ if split < 0 {
+ return errors.New("malformed note")
+ }
+ text, data := data[:split+1], data[split+2:]
+ if len(data) == 0 || data[len(data)-1] != '\n' {
+ return errors.New("malformed note")
+ }
+
+ sn := SignedNote{
+ Note: string(text),
+ }
+
+ b := bufio.NewScanner(bytes.NewReader(data))
+ for b.Scan() {
+ var name, signature string
+ if _, err := fmt.Fscanf(strings.NewReader(b.Text()), "\u2014 %s %s\n", &name, &signature); err != nil {
+ return fmt.Errorf("parsing signature: %w", err)
+ }
+
+ sigBytes, err := base64.StdEncoding.DecodeString(signature)
+ if err != nil {
+ return fmt.Errorf("decoding signature: %w", err)
+ }
+ if len(sigBytes) < 5 {
+ return errors.New("signature is too small")
+ }
+
+ sig := note.Signature{
+ Name: name,
+ Hash: binary.BigEndian.Uint32(sigBytes[0:4]),
+ Base64: base64.StdEncoding.EncodeToString(sigBytes[4:]),
+ }
+ sn.Signatures = append(sn.Signatures, sig)
+
+ }
+ if len(sn.Signatures) == 0 {
+ return errors.New("no signatures found in input")
+ }
+
+ // copy sc to s
+ *s = sn
+ return nil
+}
+
+func SignedNoteValidator(strToValidate string) bool {
+ s := SignedNote{}
+ return s.UnmarshalText([]byte(strToValidate)) == nil
+}
+
+func getPublicKeyHash(publicKey crypto.PublicKey) (uint32, error) {
+ pubKeyBytes, err := x509.MarshalPKIXPublicKey(publicKey)
+ if err != nil {
+ return 0, fmt.Errorf("marshalling public key: %w", err)
+ }
+ pkSha := sha256.Sum256(pubKeyBytes)
+ hash := binary.BigEndian.Uint32(pkSha[:])
+ return hash, nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/verify/verify.go b/vendor/github.com/sigstore/rekor/pkg/verify/verify.go
new file mode 100644
index 00000000000..61846923b79
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/verify/verify.go
@@ -0,0 +1,234 @@
+//
+// Copyright 2022 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package verify
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer"
+ "github.com/sigstore/rekor/pkg/generated/client"
+ "github.com/sigstore/rekor/pkg/generated/client/tlog"
+ "github.com/sigstore/rekor/pkg/generated/models"
+ "github.com/sigstore/rekor/pkg/util"
+ "github.com/sigstore/sigstore/pkg/signature"
+ "github.com/sigstore/sigstore/pkg/signature/options"
+ "github.com/transparency-dev/merkle/proof"
+ "github.com/transparency-dev/merkle/rfc6962"
+)
+
+// ProveConsistency verifies consistency between an initial, trusted STH
+// and a second new STH. Callers MUST verify signature on the STHs'.
+func ProveConsistency(ctx context.Context, rClient *client.Rekor,
+ oldSTH *util.SignedCheckpoint, newSTH *util.SignedCheckpoint, treeID string) error {
+ oldTreeSize := int64(oldSTH.Size) // nolint: gosec
+ switch {
+ case oldTreeSize == 0:
+ return errors.New("consistency proofs can not be computed starting from an empty log")
+ case oldTreeSize == int64(newSTH.Size): // nolint: gosec
+ if !bytes.Equal(oldSTH.Hash, newSTH.Hash) {
+ return errors.New("old root hash does not match STH hash")
+ }
+ case oldTreeSize < int64(newSTH.Size): // nolint: gosec
+ consistencyParams := tlog.NewGetLogProofParamsWithContext(ctx)
+ consistencyParams.FirstSize = &oldTreeSize // Root size at the old, or trusted state.
+ consistencyParams.LastSize = int64(newSTH.Size) // nolint: gosec // Root size at the new state to verify against.
+ consistencyParams.TreeID = &treeID
+ consistencyProof, err := rClient.Tlog.GetLogProof(consistencyParams)
+ if err != nil {
+ return err
+ }
+ var hashes [][]byte
+ for _, h := range consistencyProof.Payload.Hashes {
+ b, err := hex.DecodeString(h)
+ if err != nil {
+ return errors.New("error decoding consistency proof hashes")
+ }
+ hashes = append(hashes, b)
+ }
+ if err := proof.VerifyConsistency(rfc6962.DefaultHasher,
+ oldSTH.Size, newSTH.Size, hashes, oldSTH.Hash, newSTH.Hash); err != nil {
+ return err
+ }
+ case oldTreeSize > int64(newSTH.Size): // nolint: gosec
+ return errors.New("inclusion proof returned a tree size larger than the verified tree size")
+ }
+ return nil
+
+}
+
+// VerifyCurrentCheckpoint verifies the provided checkpoint by verifying consistency
+// against a newly fetched Checkpoint.
+// nolint
+func VerifyCurrentCheckpoint(ctx context.Context, rClient *client.Rekor, verifier signature.Verifier,
+ oldSTH *util.SignedCheckpoint) (*util.SignedCheckpoint, error) {
+ // The oldSTH should already be verified, but check for robustness.
+ if !oldSTH.Verify(verifier) {
+ return nil, errors.New("signature on old tree head did not verify")
+ }
+
+ // Get and verify against the current STH.
+ infoParams := tlog.NewGetLogInfoParamsWithContext(ctx)
+ result, err := rClient.Tlog.GetLogInfo(infoParams)
+ if err != nil {
+ return nil, err
+ }
+
+ logInfo := result.GetPayload()
+ sth := util.SignedCheckpoint{}
+ if err := sth.UnmarshalText([]byte(*logInfo.SignedTreeHead)); err != nil {
+ return nil, err
+ }
+
+ // Verify the signature on the SignedCheckpoint.
+ if !sth.Verify(verifier) {
+ return nil, errors.New("signature on tree head did not verify")
+ }
+
+ // Now verify consistency up to the STH.
+ if err := ProveConsistency(ctx, rClient, oldSTH, &sth, *logInfo.TreeID); err != nil {
+ return nil, err
+ }
+ return &sth, nil
+}
+
+// VerifyCheckpointSignature verifies the signature on a checkpoint (signed tree head). It does
+// not verify consistency against other checkpoints.
+// nolint
+func VerifyCheckpointSignature(e *models.LogEntryAnon, verifier signature.Verifier) error {
+ sth := &util.SignedCheckpoint{}
+ if err := sth.UnmarshalText([]byte(*e.Verification.InclusionProof.Checkpoint)); err != nil {
+ return fmt.Errorf("unmarshalling log entry checkpoint to SignedCheckpoint: %w", err)
+ }
+ if !sth.Verify(verifier) {
+ return errors.New("signature on checkpoint did not verify")
+ }
+ rootHash, err := hex.DecodeString(*e.Verification.InclusionProof.RootHash)
+ if err != nil {
+ return errors.New("decoding inclusion proof root has")
+ }
+
+ if !bytes.EqualFold(rootHash, sth.Hash) {
+ return fmt.Errorf("proof root hash does not match signed tree head, expected %s got %s",
+ *e.Verification.InclusionProof.RootHash,
+ hex.EncodeToString(sth.Hash))
+ }
+ return nil
+}
+
+// VerifyInclusion verifies an entry's inclusion proof. Clients MUST either verify
+// the root hash against a new STH (via VerifyCurrentCheckpoint) or against a
+// trusted, existing STH (via ProveConsistency).
+// nolint
+func VerifyInclusion(ctx context.Context, e *models.LogEntryAnon) error {
+ if e.Verification == nil || e.Verification.InclusionProof == nil {
+ return errors.New("inclusion proof not provided")
+ }
+
+ hashes := [][]byte{}
+ for _, h := range e.Verification.InclusionProof.Hashes {
+ hb, _ := hex.DecodeString(h)
+ hashes = append(hashes, hb)
+ }
+
+ rootHash, err := hex.DecodeString(*e.Verification.InclusionProof.RootHash)
+ if err != nil {
+ return err
+ }
+
+ // Verify the inclusion proof.
+ entryBytes, err := base64.StdEncoding.DecodeString(e.Body.(string))
+ if err != nil {
+ return err
+ }
+ leafHash := rfc6962.DefaultHasher.HashLeaf(entryBytes)
+
+ if err := proof.VerifyInclusion(rfc6962.DefaultHasher, uint64(*e.Verification.InclusionProof.LogIndex),
+ uint64(*e.Verification.InclusionProof.TreeSize), leafHash, hashes, rootHash); err != nil { // nolint: gosec
+ return err
+ }
+
+ return nil
+}
+
+// VerifySignedEntryTimestamp verifies the entry's SET against the provided
+// public key.
+// nolint
+func VerifySignedEntryTimestamp(ctx context.Context, e *models.LogEntryAnon, verifier signature.Verifier) error {
+ if e.Verification == nil {
+ return errors.New("missing verification")
+ }
+ if e.Verification.SignedEntryTimestamp == nil {
+ return errors.New("signature missing")
+ }
+
+ type bundle struct {
+ Body interface{} `json:"body"`
+ IntegratedTime int64 `json:"integratedTime"`
+ // Note that this is the virtual index.
+ LogIndex int64 `json:"logIndex"`
+ LogID string `json:"logID"`
+ }
+ bundlePayload := bundle{
+ Body: e.Body,
+ IntegratedTime: *e.IntegratedTime,
+ LogIndex: *e.LogIndex,
+ LogID: *e.LogID,
+ }
+ contents, err := json.Marshal(bundlePayload)
+ if err != nil {
+ return fmt.Errorf("marshaling bundle: %w", err)
+ }
+ canonicalized, err := jsoncanonicalizer.Transform(contents)
+ if err != nil {
+ return fmt.Errorf("canonicalizing bundle: %w", err)
+ }
+
+ // verify the SET against the public key
+ if err := verifier.VerifySignature(bytes.NewReader(e.Verification.SignedEntryTimestamp),
+ bytes.NewReader(canonicalized), options.WithContext(ctx)); err != nil {
+ return fmt.Errorf("unable to verify bundle: %w", err)
+ }
+ return nil
+}
+
+// VerifyLogEntry performs verification of a LogEntry given a Rekor verifier.
+// Performs inclusion proof verification up to a verified root hash,
+// SignedEntryTimestamp verification, and checkpoint verification.
+// nolint
+func VerifyLogEntry(ctx context.Context, e *models.LogEntryAnon, verifier signature.Verifier) error {
+ // Verify the inclusion proof using the body's leaf hash.
+ if err := VerifyInclusion(ctx, e); err != nil {
+ return err
+ }
+
+ // Verify checkpoint, which includes a signed root hash.
+ if err := VerifyCheckpointSignature(e, verifier); err != nil {
+ return err
+ }
+
+ // Verify the Signed Entry Timestamp.
+ if err := VerifySignedEntryTimestamp(ctx, e, verifier); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/COPYRIGHT.txt b/vendor/github.com/sigstore/sigstore-go/COPYRIGHT.txt
new file mode 100644
index 00000000000..406ee260604
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/COPYRIGHT.txt
@@ -0,0 +1,13 @@
+Copyright 2023 The Sigstore Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/sigstore/sigstore-go/LICENSE b/vendor/github.com/sigstore/sigstore-go/LICENSE
new file mode 100644
index 00000000000..261eeb9e9f8
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/bundle/bundle.go b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/bundle.go
new file mode 100644
index 00000000000..5919230e8a3
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/bundle.go
@@ -0,0 +1,408 @@
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bundle
+
+import (
+ "crypto/x509"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/secure-systems-lab/go-securesystemslib/dsse"
+ protobundle "github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1"
+ protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1"
+ protodsse "github.com/sigstore/protobuf-specs/gen/pb-go/dsse"
+ "golang.org/x/mod/semver"
+ "google.golang.org/protobuf/encoding/protojson"
+
+ "github.com/sigstore/sigstore-go/pkg/tlog"
+ "github.com/sigstore/sigstore-go/pkg/verify"
+)
+
+var ErrValidation = errors.New("validation error")
+var ErrUnsupportedMediaType = fmt.Errorf("%w: unsupported media type", ErrValidation)
+var ErrEmptyBundle = fmt.Errorf("%w: empty protobuf bundle", ErrValidation)
+var ErrMissingVerificationMaterial = fmt.Errorf("%w: missing verification material", ErrValidation)
+var ErrMissingBundleContent = fmt.Errorf("%w: missing bundle content", ErrValidation)
+var ErrUnimplemented = errors.New("unimplemented")
+var ErrInvalidAttestation = fmt.Errorf("%w: invalid attestation", ErrValidation)
+var ErrMissingEnvelope = fmt.Errorf("%w: missing valid envelope", ErrInvalidAttestation)
+var ErrDecodingJSON = fmt.Errorf("%w: decoding json", ErrInvalidAttestation)
+var ErrDecodingB64 = fmt.Errorf("%w: decoding base64", ErrInvalidAttestation)
+
+const mediaTypeBase = "application/vnd.dev.sigstore.bundle"
+
+func ErrValidationError(err error) error {
+ return fmt.Errorf("%w: %w", ErrValidation, err)
+}
+
+type Bundle struct {
+ *protobundle.Bundle
+ hasInclusionPromise bool
+ hasInclusionProof bool
+}
+
+func NewBundle(pbundle *protobundle.Bundle) (*Bundle, error) {
+ bundle := &Bundle{
+ Bundle: pbundle,
+ hasInclusionPromise: false,
+ hasInclusionProof: false,
+ }
+
+ err := bundle.validate()
+ if err != nil {
+ return nil, err
+ }
+
+ return bundle, nil
+}
+
+// Deprecated: use Bundle instead
+type ProtobufBundle = Bundle
+
+// Deprecated: use NewBundle instead
+func NewProtobufBundle(b *protobundle.Bundle) (*ProtobufBundle, error) {
+ return NewBundle(b)
+}
+
+func (b *Bundle) validate() error {
+ bundleVersion, err := b.Version()
+ if err != nil {
+ return fmt.Errorf("error getting bundle version: %w", err)
+ }
+
+ // if bundle version is < 0.1, return error
+ if semver.Compare(bundleVersion, "v0.1") < 0 {
+ return fmt.Errorf("%w: bundle version %s is not supported", ErrUnsupportedMediaType, bundleVersion)
+ }
+
+ // fetch tlog entries, as next check needs to check them for inclusion proof/promise
+ entries, err := b.TlogEntries()
+ if err != nil {
+ return err
+ }
+
+ // if bundle version == v0.1, require inclusion promise
+ if semver.Compare(bundleVersion, "v0.1") == 0 {
+ if len(entries) > 0 && !b.hasInclusionPromise {
+ return errors.New("inclusion promises missing in bundle (required for bundle v0.1)")
+ }
+ } else {
+ // if bundle version >= v0.2, require inclusion proof
+ if len(entries) > 0 && !b.hasInclusionProof {
+ return errors.New("inclusion proof missing in bundle (required for bundle v0.2)")
+ }
+ }
+
+ // if bundle version >= v0.3, require verification material to not be X.509 certificate chain (only single certificate is allowed)
+ if semver.Compare(bundleVersion, "v0.3") >= 0 {
+ certs := b.VerificationMaterial.GetX509CertificateChain()
+
+ if certs != nil {
+ return errors.New("verification material cannot be X.509 certificate chain (for bundle v0.3)")
+ }
+ }
+
+ // if bundle version is >= v0.4, return error as this version is not supported
+ if semver.Compare(bundleVersion, "v0.4") >= 0 {
+ return fmt.Errorf("%w: bundle version %s is not yet supported", ErrUnsupportedMediaType, bundleVersion)
+ }
+
+ err = validateBundle(b.Bundle)
+ if err != nil {
+ return fmt.Errorf("invalid bundle: %w", err)
+ }
+ return nil
+}
+
+// MediaTypeString returns a mediatype string for the specified bundle version.
+// The function returns an error if the resulting string does validate.
+func MediaTypeString(version string) (string, error) {
+ if version == "" {
+ return "", fmt.Errorf("unable to build media type string, no version defined")
+ }
+
+ var mtString string
+
+ version = strings.TrimPrefix(version, "v")
+ mtString = fmt.Sprintf("%s.v%s+json", mediaTypeBase, strings.TrimPrefix(version, "v"))
+
+ if version == "0.1" || version == "0.2" {
+ mtString = fmt.Sprintf("%s+json;version=%s", mediaTypeBase, strings.TrimPrefix(version, "v"))
+ }
+
+ if _, err := getBundleVersion(mtString); err != nil {
+ return "", fmt.Errorf("unable to build mediatype: %w", err)
+ }
+
+ return mtString, nil
+}
+
+func (b *Bundle) Version() (string, error) {
+ return getBundleVersion(b.MediaType)
+}
+
+func getBundleVersion(mediaType string) (string, error) {
+ switch mediaType {
+ case mediaTypeBase + "+json;version=0.1":
+ return "v0.1", nil
+ case mediaTypeBase + "+json;version=0.2":
+ return "v0.2", nil
+ case mediaTypeBase + "+json;version=0.3":
+ return "v0.3", nil
+ }
+ if strings.HasPrefix(mediaType, mediaTypeBase+".v") && strings.HasSuffix(mediaType, "+json") {
+ version := strings.TrimPrefix(mediaType, mediaTypeBase+".")
+ version = strings.TrimSuffix(version, "+json")
+ if semver.IsValid(version) {
+ return version, nil
+ }
+ return "", fmt.Errorf("%w: invalid bundle version: %s", ErrUnsupportedMediaType, version)
+ }
+ return "", fmt.Errorf("%w: %s", ErrUnsupportedMediaType, mediaType)
+}
+
+func validateBundle(b *protobundle.Bundle) error {
+ if b == nil {
+ return ErrEmptyBundle
+ }
+
+ if b.Content == nil {
+ return ErrMissingBundleContent
+ }
+
+ switch b.Content.(type) {
+ case *protobundle.Bundle_DsseEnvelope, *protobundle.Bundle_MessageSignature:
+ default:
+ return fmt.Errorf("invalid bundle content: bundle content must be either a message signature or dsse envelope")
+ }
+
+ if b.VerificationMaterial == nil || b.VerificationMaterial.Content == nil {
+ return ErrMissingVerificationMaterial
+ }
+
+ switch b.VerificationMaterial.Content.(type) {
+ case *protobundle.VerificationMaterial_PublicKey, *protobundle.VerificationMaterial_Certificate, *protobundle.VerificationMaterial_X509CertificateChain:
+ default:
+ return fmt.Errorf("invalid verification material content: verification material must be one of public key, x509 certificate and x509 certificate chain")
+ }
+
+ return nil
+}
+
+func LoadJSONFromPath(path string) (*Bundle, error) {
+ var bundle Bundle
+ bundle.Bundle = new(protobundle.Bundle)
+
+ contents, err := os.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ err = bundle.UnmarshalJSON(contents)
+ if err != nil {
+ return nil, err
+ }
+
+ return &bundle, nil
+}
+
+func (b *Bundle) MarshalJSON() ([]byte, error) {
+ return protojson.Marshal(b.Bundle)
+}
+
+func (b *Bundle) UnmarshalJSON(data []byte) error {
+ b.Bundle = new(protobundle.Bundle)
+ err := protojson.Unmarshal(data, b.Bundle)
+ if err != nil {
+ return err
+ }
+
+ err = b.validate()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (b *Bundle) VerificationContent() (verify.VerificationContent, error) {
+ if b.VerificationMaterial == nil {
+ return nil, ErrMissingVerificationMaterial
+ }
+
+ switch content := b.VerificationMaterial.GetContent().(type) {
+ case *protobundle.VerificationMaterial_X509CertificateChain:
+ if content.X509CertificateChain == nil {
+ return nil, ErrMissingVerificationMaterial
+ }
+ certs := content.X509CertificateChain.GetCertificates()
+ if len(certs) == 0 || certs[0].RawBytes == nil {
+ return nil, ErrMissingVerificationMaterial
+ }
+ parsedCert, err := x509.ParseCertificate(certs[0].RawBytes)
+ if err != nil {
+ return nil, ErrValidationError(err)
+ }
+ cert := &Certificate{
+ certificate: parsedCert,
+ }
+ return cert, nil
+ case *protobundle.VerificationMaterial_Certificate:
+ if content.Certificate == nil || content.Certificate.RawBytes == nil {
+ return nil, ErrMissingVerificationMaterial
+ }
+ parsedCert, err := x509.ParseCertificate(content.Certificate.RawBytes)
+ if err != nil {
+ return nil, ErrValidationError(err)
+ }
+ cert := &Certificate{
+ certificate: parsedCert,
+ }
+ return cert, nil
+ case *protobundle.VerificationMaterial_PublicKey:
+ if content.PublicKey == nil {
+ return nil, ErrMissingVerificationMaterial
+ }
+ pk := &PublicKey{
+ hint: content.PublicKey.Hint,
+ }
+ return pk, nil
+
+ default:
+ return nil, ErrMissingVerificationMaterial
+ }
+}
+
+func (b *Bundle) HasInclusionPromise() bool {
+ return b.hasInclusionPromise
+}
+
+func (b *Bundle) HasInclusionProof() bool {
+ return b.hasInclusionProof
+}
+
+func (b *Bundle) TlogEntries() ([]*tlog.Entry, error) {
+ if b.VerificationMaterial == nil {
+ return nil, nil
+ }
+
+ tlogEntries := make([]*tlog.Entry, len(b.VerificationMaterial.TlogEntries))
+ var err error
+ for i, entry := range b.VerificationMaterial.TlogEntries {
+ tlogEntries[i], err = tlog.ParseTransparencyLogEntry(entry)
+ if err != nil {
+ return nil, ErrValidationError(err)
+ }
+
+ if tlogEntries[i].HasInclusionPromise() {
+ b.hasInclusionPromise = true
+ }
+ if tlogEntries[i].HasInclusionProof() {
+ b.hasInclusionProof = true
+ }
+ }
+
+ return tlogEntries, nil
+}
+
+func (b *Bundle) SignatureContent() (verify.SignatureContent, error) {
+ switch content := b.Content.(type) { //nolint:gocritic
+ case *protobundle.Bundle_DsseEnvelope:
+ envelope, err := parseEnvelope(content.DsseEnvelope)
+ if err != nil {
+ return nil, err
+ }
+ return envelope, nil
+ case *protobundle.Bundle_MessageSignature:
+ if content.MessageSignature == nil || content.MessageSignature.MessageDigest == nil {
+ return nil, ErrMissingVerificationMaterial
+ }
+ return NewMessageSignature(
+ content.MessageSignature.MessageDigest.Digest,
+ protocommon.HashAlgorithm_name[int32(content.MessageSignature.MessageDigest.Algorithm)],
+ content.MessageSignature.Signature,
+ ), nil
+ }
+ return nil, ErrMissingVerificationMaterial
+}
+
+func (b *Bundle) Envelope() (*Envelope, error) {
+ switch content := b.Content.(type) { //nolint:gocritic
+ case *protobundle.Bundle_DsseEnvelope:
+ envelope, err := parseEnvelope(content.DsseEnvelope)
+ if err != nil {
+ return nil, err
+ }
+ return envelope, nil
+ }
+ return nil, ErrMissingVerificationMaterial
+}
+
+func (b *Bundle) Timestamps() ([][]byte, error) {
+ if b.VerificationMaterial == nil {
+ return nil, ErrMissingVerificationMaterial
+ }
+
+ signedTimestamps := make([][]byte, 0)
+
+ if b.VerificationMaterial.TimestampVerificationData == nil {
+ return signedTimestamps, nil
+ }
+
+ for _, timestamp := range b.VerificationMaterial.TimestampVerificationData.Rfc3161Timestamps {
+ signedTimestamps = append(signedTimestamps, timestamp.SignedTimestamp)
+ }
+
+ return signedTimestamps, nil
+}
+
+// MinVersion returns true if the bundle version is greater than or equal to the expected version.
+func (b *Bundle) MinVersion(expectVersion string) bool {
+ version, err := b.Version()
+ if err != nil {
+ return false
+ }
+
+ if !strings.HasPrefix(expectVersion, "v") {
+ expectVersion = "v" + expectVersion
+ }
+
+ return semver.Compare(version, expectVersion) >= 0
+}
+
+func parseEnvelope(input *protodsse.Envelope) (*Envelope, error) {
+ if input == nil {
+ return nil, ErrMissingEnvelope
+ }
+ output := &dsse.Envelope{}
+ payload := input.GetPayload()
+ if payload == nil {
+ return nil, ErrMissingEnvelope
+ }
+ output.Payload = base64.StdEncoding.EncodeToString([]byte(payload))
+ output.PayloadType = string(input.GetPayloadType())
+ output.Signatures = make([]dsse.Signature, len(input.GetSignatures()))
+ for i, sig := range input.GetSignatures() {
+ if sig == nil {
+ return nil, ErrMissingEnvelope
+ }
+ output.Signatures[i].KeyID = sig.GetKeyid()
+ output.Signatures[i].Sig = base64.StdEncoding.EncodeToString(sig.GetSig())
+ }
+ return &Envelope{Envelope: output}, nil
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/bundle/signature_content.go b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/signature_content.go
new file mode 100644
index 00000000000..b3e6dd69e6c
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/signature_content.go
@@ -0,0 +1,106 @@
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bundle
+
+import (
+ "encoding/base64"
+
+ in_toto "github.com/in-toto/attestation/go/v1"
+ "github.com/secure-systems-lab/go-securesystemslib/dsse"
+ "github.com/sigstore/sigstore-go/pkg/verify"
+ "google.golang.org/protobuf/encoding/protojson"
+)
+
+const IntotoMediaType = "application/vnd.in-toto+json"
+
+type MessageSignature struct {
+ digest []byte
+ digestAlgorithm string
+ signature []byte
+}
+
+func (m *MessageSignature) Digest() []byte {
+ return m.digest
+}
+
+func (m *MessageSignature) DigestAlgorithm() string {
+ return m.digestAlgorithm
+}
+
+func NewMessageSignature(digest []byte, digestAlgorithm string, signature []byte) *MessageSignature {
+ return &MessageSignature{
+ digest: digest,
+ digestAlgorithm: digestAlgorithm,
+ signature: signature,
+ }
+}
+
+type Envelope struct {
+ *dsse.Envelope
+}
+
+func (e *Envelope) Statement() (*in_toto.Statement, error) {
+ if e.PayloadType != IntotoMediaType {
+ return nil, ErrUnsupportedMediaType
+ }
+
+ var statement in_toto.Statement
+ raw, err := e.DecodeB64Payload()
+ if err != nil {
+ return nil, ErrDecodingB64
+ }
+ err = protojson.Unmarshal(raw, &statement)
+ if err != nil {
+ return nil, ErrDecodingJSON
+ }
+ return &statement, nil
+}
+
+func (e *Envelope) EnvelopeContent() verify.EnvelopeContent {
+ return e
+}
+
+func (e *Envelope) RawEnvelope() *dsse.Envelope {
+ return e.Envelope
+}
+
+func (m *MessageSignature) EnvelopeContent() verify.EnvelopeContent {
+ return nil
+}
+
+func (e *Envelope) MessageSignatureContent() verify.MessageSignatureContent {
+ return nil
+}
+
+func (m *MessageSignature) MessageSignatureContent() verify.MessageSignatureContent {
+ return m
+}
+
+func (m *MessageSignature) Signature() []byte {
+ return m.signature
+}
+
+func (e *Envelope) Signature() []byte {
+ if len(e.Signatures) == 0 {
+ return []byte{}
+ }
+
+ sigBytes, err := base64.StdEncoding.DecodeString(e.Signatures[0].Sig)
+ if err != nil {
+ return []byte{}
+ }
+
+ return sigBytes
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/bundle/verification_content.go b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/verification_content.go
new file mode 100644
index 00000000000..86e1e0bbc0c
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/bundle/verification_content.go
@@ -0,0 +1,92 @@
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bundle
+
+import (
+ "crypto"
+ "crypto/x509"
+ "time"
+
+ "github.com/sigstore/sigstore-go/pkg/root"
+ "github.com/sigstore/sigstore-go/pkg/verify"
+)
+
+type Certificate struct {
+ certificate *x509.Certificate
+}
+
+func NewCertificate(cert *x509.Certificate) *Certificate {
+ return &Certificate{certificate: cert}
+}
+
+type PublicKey struct {
+ hint string
+}
+
+func (pk PublicKey) Hint() string {
+ return pk.hint
+}
+
+func (c *Certificate) CompareKey(key any, _ root.TrustedMaterial) bool {
+ x509Key, ok := key.(*x509.Certificate)
+ if !ok {
+ return false
+ }
+
+ return c.certificate.Equal(x509Key)
+}
+
+func (c *Certificate) ValidAtTime(t time.Time, _ root.TrustedMaterial) bool {
+ return !c.certificate.NotAfter.Before(t) && !c.certificate.NotBefore.After(t)
+}
+
+func (c *Certificate) Certificate() *x509.Certificate {
+ return c.certificate
+}
+
+func (c *Certificate) PublicKey() verify.PublicKeyProvider {
+ return nil
+}
+
+func (pk *PublicKey) CompareKey(key any, tm root.TrustedMaterial) bool {
+ verifier, err := tm.PublicKeyVerifier(pk.hint)
+ if err != nil {
+ return false
+ }
+ pubKey, err := verifier.PublicKey()
+ if err != nil {
+ return false
+ }
+ if equaler, ok := key.(interface{ Equal(x crypto.PublicKey) bool }); ok {
+ return equaler.Equal(pubKey)
+ }
+ return false
+}
+
+func (pk *PublicKey) ValidAtTime(t time.Time, tm root.TrustedMaterial) bool {
+ verifier, err := tm.PublicKeyVerifier(pk.hint)
+ if err != nil {
+ return false
+ }
+ return verifier.ValidAtTime(t)
+}
+
+func (pk *PublicKey) Certificate() *x509.Certificate {
+ return nil
+}
+
+func (pk *PublicKey) PublicKey() verify.PublicKeyProvider {
+ return pk
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/extensions.go b/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/extensions.go
new file mode 100644
index 00000000000..e9a0680d162
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/extensions.go
@@ -0,0 +1,239 @@
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file is a verbatim copy of https://github.com/sigstore/fulcio/blob/3707d80bb25330bc7ffbd9702fb401cd643e36fa/pkg/certificate/extensions.go ,
+// EXCEPT:
+// - the parseExtensions func has been renamed ParseExtensions
+
+package certificate
+
+import (
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "errors"
+ "fmt"
+)
+
+var (
+ // Deprecated: Use OIDIssuerV2
+ OIDIssuer = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 1}
+ // Deprecated: Use OIDBuildTrigger
+ OIDGitHubWorkflowTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 2}
+ // Deprecated: Use OIDSourceRepositoryDigest
+ OIDGitHubWorkflowSHA = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 3}
+ // Deprecated: Use OIDBuildConfigURI or OIDBuildConfigDigest
+ OIDGitHubWorkflowName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 4}
+ // Deprecated: Use SourceRepositoryURI
+ OIDGitHubWorkflowRepository = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 5}
+ // Deprecated: Use OIDSourceRepositoryRef
+ OIDGitHubWorkflowRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 6}
+
+ OIDOtherName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 7}
+ OIDIssuerV2 = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 8}
+
+ // CI extensions
+ OIDBuildSignerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 9}
+ OIDBuildSignerDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 10}
+ OIDRunnerEnvironment = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 11}
+ OIDSourceRepositoryURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 12}
+ OIDSourceRepositoryDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 13}
+ OIDSourceRepositoryRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 14}
+ OIDSourceRepositoryIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 15}
+ OIDSourceRepositoryOwnerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 16}
+ OIDSourceRepositoryOwnerIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 17}
+ OIDBuildConfigURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 18}
+ OIDBuildConfigDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 19}
+ OIDBuildTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 20}
+ OIDRunInvocationURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 21}
+ OIDSourceRepositoryVisibilityAtSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 22}
+)
+
+// Extensions contains all custom x509 extensions defined by Fulcio
+type Extensions struct {
+ // NB: New extensions must be added here and documented
+ // at docs/oidc-info.md
+
+ // The OIDC issuer. Should match `iss` claim of ID token or, in the case of
+ // a federated login like Dex it should match the issuer URL of the
+ // upstream issuer. The issuer is not set the extensions are invalid and
+ // will fail to render.
+ Issuer string `json:"issuer,omitempty"` // OID 1.3.6.1.4.1.57264.1.8 and 1.3.6.1.4.1.57264.1.1 (Deprecated)
+
+ // Deprecated
+ // Triggering event of the Github Workflow. Matches the `event_name` claim of ID
+ // tokens from Github Actions
+ GithubWorkflowTrigger string `json:"githubWorkflowTrigger,omitempty"` // OID 1.3.6.1.4.1.57264.1.2
+
+ // Deprecated
+ // SHA of git commit being built in Github Actions. Matches the `sha` claim of ID
+ // tokens from Github Actions
+ GithubWorkflowSHA string `json:"githubWorkflowSHA,omitempty"` //nolint:tagliatelle // OID 1.3.6.1.4.1.57264.1.3
+
+ // Deprecated
+ // Name of Github Actions Workflow. Matches the `workflow` claim of the ID
+ // tokens from Github Actions
+ GithubWorkflowName string `json:"githubWorkflowName,omitempty"` // OID 1.3.6.1.4.1.57264.1.4
+
+ // Deprecated
+ // Repository of the Github Actions Workflow. Matches the `repository` claim of the ID
+ // tokens from Github Actions
+ GithubWorkflowRepository string `json:"githubWorkflowRepository,omitempty"` // OID 1.3.6.1.4.1.57264.1.5
+
+ // Deprecated
+ // Git Ref of the Github Actions Workflow. Matches the `ref` claim of the ID tokens
+ // from Github Actions
+ GithubWorkflowRef string `json:"githubWorkflowRef,omitempty"` // 1.3.6.1.4.1.57264.1.6
+
+ // Reference to specific build instructions that are responsible for signing.
+ BuildSignerURI string `json:"buildSignerURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.9
+
+ // Immutable reference to the specific version of the build instructions that is responsible for signing.
+ BuildSignerDigest string `json:"buildSignerDigest,omitempty"` // 1.3.6.1.4.1.57264.1.10
+
+ // Specifies whether the build took place in platform-hosted cloud infrastructure or customer/self-hosted infrastructure.
+ RunnerEnvironment string `json:"runnerEnvironment,omitempty"` // 1.3.6.1.4.1.57264.1.11
+
+ // Source repository URL that the build was based on.
+ SourceRepositoryURI string `json:"sourceRepositoryURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.12
+
+ // Immutable reference to a specific version of the source code that the build was based upon.
+ SourceRepositoryDigest string `json:"sourceRepositoryDigest,omitempty"` // 1.3.6.1.4.1.57264.1.13
+
+ // Source Repository Ref that the build run was based upon.
+ SourceRepositoryRef string `json:"sourceRepositoryRef,omitempty"` // 1.3.6.1.4.1.57264.1.14
+
+ // Immutable identifier for the source repository the workflow was based upon.
+ SourceRepositoryIdentifier string `json:"sourceRepositoryIdentifier,omitempty"` // 1.3.6.1.4.1.57264.1.15
+
+ // Source repository owner URL of the owner of the source repository that the build was based on.
+ SourceRepositoryOwnerURI string `json:"sourceRepositoryOwnerURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.16
+
+ // Immutable identifier for the owner of the source repository that the workflow was based upon.
+ SourceRepositoryOwnerIdentifier string `json:"sourceRepositoryOwnerIdentifier,omitempty"` // 1.3.6.1.4.1.57264.1.17
+
+ // Build Config URL to the top-level/initiating build instructions.
+ BuildConfigURI string `json:"buildConfigURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.18
+
+ // Immutable reference to the specific version of the top-level/initiating build instructions.
+ BuildConfigDigest string `json:"buildConfigDigest,omitempty"` // 1.3.6.1.4.1.57264.1.19
+
+ // Event or action that initiated the build.
+ BuildTrigger string `json:"buildTrigger,omitempty"` // 1.3.6.1.4.1.57264.1.20
+
+ // Run Invocation URL to uniquely identify the build execution.
+ RunInvocationURI string `json:"runInvocationURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.21
+
+ // Source repository visibility at the time of signing the certificate.
+ SourceRepositoryVisibilityAtSigning string `json:"sourceRepositoryVisibilityAtSigning,omitempty"` // 1.3.6.1.4.1.57264.1.22
+}
+
+func ParseExtensions(ext []pkix.Extension) (Extensions, error) {
+ out := Extensions{}
+
+ for _, e := range ext {
+ switch {
+ // BEGIN: Deprecated
+ case e.Id.Equal(OIDIssuer):
+ out.Issuer = string(e.Value)
+ case e.Id.Equal(OIDGitHubWorkflowTrigger):
+ out.GithubWorkflowTrigger = string(e.Value)
+ case e.Id.Equal(OIDGitHubWorkflowSHA):
+ out.GithubWorkflowSHA = string(e.Value)
+ case e.Id.Equal(OIDGitHubWorkflowName):
+ out.GithubWorkflowName = string(e.Value)
+ case e.Id.Equal(OIDGitHubWorkflowRepository):
+ out.GithubWorkflowRepository = string(e.Value)
+ case e.Id.Equal(OIDGitHubWorkflowRef):
+ out.GithubWorkflowRef = string(e.Value)
+ // END: Deprecated
+ case e.Id.Equal(OIDIssuerV2):
+ if err := ParseDERString(e.Value, &out.Issuer); err != nil {
+ return Extensions{}, err
+ }
+ case e.Id.Equal(OIDBuildSignerURI):
+ if err := ParseDERString(e.Value, &out.BuildSignerURI); err != nil {
+ return Extensions{}, err
+ }
+ case e.Id.Equal(OIDBuildSignerDigest):
+ if err := ParseDERString(e.Value, &out.BuildSignerDigest); err != nil {
+ return Extensions{}, err
+ }
+ case e.Id.Equal(OIDRunnerEnvironment):
+ if err := ParseDERString(e.Value, &out.RunnerEnvironment); err != nil {
+ return Extensions{}, err
+ }
+ case e.Id.Equal(OIDSourceRepositoryURI):
+ if err := ParseDERString(e.Value, &out.SourceRepositoryURI); err != nil {
+ return Extensions{}, err
+ }
+ case e.Id.Equal(OIDSourceRepositoryDigest):
+ if err := ParseDERString(e.Value, &out.SourceRepositoryDigest); err != nil {
+ return Extensions{}, err
+ }
+ case e.Id.Equal(OIDSourceRepositoryRef):
+ if err := ParseDERString(e.Value, &out.SourceRepositoryRef); err != nil {
+ return Extensions{}, err
+ }
+ case e.Id.Equal(OIDSourceRepositoryIdentifier):
+ if err := ParseDERString(e.Value, &out.SourceRepositoryIdentifier); err != nil {
+ return Extensions{}, err
+ }
+ case e.Id.Equal(OIDSourceRepositoryOwnerURI):
+ if err := ParseDERString(e.Value, &out.SourceRepositoryOwnerURI); err != nil {
+ return Extensions{}, err
+ }
+ case e.Id.Equal(OIDSourceRepositoryOwnerIdentifier):
+ if err := ParseDERString(e.Value, &out.SourceRepositoryOwnerIdentifier); err != nil {
+ return Extensions{}, err
+ }
+ case e.Id.Equal(OIDBuildConfigURI):
+ if err := ParseDERString(e.Value, &out.BuildConfigURI); err != nil {
+ return Extensions{}, err
+ }
+ case e.Id.Equal(OIDBuildConfigDigest):
+ if err := ParseDERString(e.Value, &out.BuildConfigDigest); err != nil {
+ return Extensions{}, err
+ }
+ case e.Id.Equal(OIDBuildTrigger):
+ if err := ParseDERString(e.Value, &out.BuildTrigger); err != nil {
+ return Extensions{}, err
+ }
+ case e.Id.Equal(OIDRunInvocationURI):
+ if err := ParseDERString(e.Value, &out.RunInvocationURI); err != nil {
+ return Extensions{}, err
+ }
+ case e.Id.Equal(OIDSourceRepositoryVisibilityAtSigning):
+ if err := ParseDERString(e.Value, &out.SourceRepositoryVisibilityAtSigning); err != nil {
+ return Extensions{}, err
+ }
+ }
+ }
+
+ // We only ever return nil, but leaving error in place so that we can add
+ // more complex parsing of fields in a backwards compatible way if needed.
+ return out, nil
+}
+
+// ParseDERString decodes a DER-encoded string and puts the value in parsedVal.
+// Returns an error if the unmarshalling fails or if there are trailing bytes in the encoding.
+func ParseDERString(val []byte, parsedVal *string) error {
+ rest, err := asn1.Unmarshal(val, parsedVal)
+ if err != nil {
+ return fmt.Errorf("unexpected error unmarshalling DER-encoded string: %w", err)
+ }
+ if len(rest) != 0 {
+ return errors.New("unexpected trailing bytes in DER-encoded string")
+ }
+ return nil
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/summarize.go b/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/summarize.go
new file mode 100644
index 00000000000..da9a6661a90
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/fulcio/certificate/summarize.go
@@ -0,0 +1,90 @@
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package certificate
+
+import (
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+)
+
+type Summary struct {
+ CertificateIssuer string `json:"certificateIssuer"`
+ SubjectAlternativeName string `json:"subjectAlternativeName"`
+ Extensions
+}
+
+type ErrCompareExtensions struct {
+ field string
+ expected string
+ actual string
+}
+
+func (e *ErrCompareExtensions) Error() string {
+ return fmt.Sprintf("expected %s to be \"%s\", got \"%s\"", e.field, e.expected, e.actual)
+}
+
+func SummarizeCertificate(cert *x509.Certificate) (Summary, error) {
+ extensions, err := ParseExtensions(cert.Extensions)
+
+ if err != nil {
+ return Summary{}, err
+ }
+
+ var san string
+
+ switch {
+ case len(cert.URIs) > 0:
+ san = cert.URIs[0].String()
+ case len(cert.EmailAddresses) > 0:
+ san = cert.EmailAddresses[0]
+ }
+ if san == "" {
+ san, _ = cryptoutils.UnmarshalOtherNameSAN(cert.Extensions)
+ }
+ if san == "" {
+ return Summary{}, errors.New("no Subject Alternative Name found")
+ }
+
+ return Summary{CertificateIssuer: cert.Issuer.String(), SubjectAlternativeName: san, Extensions: extensions}, nil
+}
+
+// CompareExtensions compares two Extensions structs and returns an error if
+// any set values in the expected struct not equal. Empty fields in the
+// expectedExt struct are ignored.
+func CompareExtensions(expectedExt, actualExt Extensions) error {
+ expExtValue := reflect.ValueOf(expectedExt)
+ actExtValue := reflect.ValueOf(actualExt)
+
+ fields := reflect.VisibleFields(expExtValue.Type())
+ for _, field := range fields {
+ expectedFieldVal := expExtValue.FieldByName(field.Name)
+
+ // if the expected field is empty, skip it
+ if expectedFieldVal.IsValid() && !expectedFieldVal.IsZero() {
+ actualFieldVal := actExtValue.FieldByName(field.Name)
+ if actualFieldVal.IsValid() {
+ if expectedFieldVal.Interface() != actualFieldVal.Interface() {
+ return &ErrCompareExtensions{field.Name, fmt.Sprintf("%v", expectedFieldVal.Interface()), fmt.Sprintf("%v", actualFieldVal.Interface())}
+ }
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/certificate_authority.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/certificate_authority.go
new file mode 100644
index 00000000000..5e1cb67cca2
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/certificate_authority.go
@@ -0,0 +1,66 @@
+// Copyright 2024 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package root
+
+import (
+ "crypto/x509"
+ "errors"
+ "time"
+)
+
+type CertificateAuthority interface {
+ Verify(cert *x509.Certificate, observerTimestamp time.Time) ([][]*x509.Certificate, error)
+}
+
+type FulcioCertificateAuthority struct {
+ Root *x509.Certificate
+ Intermediates []*x509.Certificate
+ ValidityPeriodStart time.Time
+ ValidityPeriodEnd time.Time
+ URI string
+}
+
+var _ CertificateAuthority = &FulcioCertificateAuthority{}
+
+func (ca *FulcioCertificateAuthority) Verify(cert *x509.Certificate, observerTimestamp time.Time) ([][]*x509.Certificate, error) {
+ if !ca.ValidityPeriodStart.IsZero() && observerTimestamp.Before(ca.ValidityPeriodStart) {
+ return nil, errors.New("certificate is not valid yet")
+ }
+ if !ca.ValidityPeriodEnd.IsZero() && observerTimestamp.After(ca.ValidityPeriodEnd) {
+ return nil, errors.New("certificate is no longer valid")
+ }
+
+ rootCertPool := x509.NewCertPool()
+ rootCertPool.AddCert(ca.Root)
+ intermediateCertPool := x509.NewCertPool()
+ for _, cert := range ca.Intermediates {
+ intermediateCertPool.AddCert(cert)
+ }
+
+ // From spec:
+ // > ## Certificate
+ // > For a signature with a given certificate to be considered valid, it must have a timestamp while every certificate in the chain up to the root is valid (the so-called “hybrid model” of certificate verification per Braun et al. (2013)).
+
+ opts := x509.VerifyOptions{
+ CurrentTime: observerTimestamp,
+ Roots: rootCertPool,
+ Intermediates: intermediateCertPool,
+ KeyUsages: []x509.ExtKeyUsage{
+ x509.ExtKeyUsageCodeSigning,
+ },
+ }
+
+ return cert.Verify(opts)
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/signing_config.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/signing_config.go
new file mode 100644
index 00000000000..a86fe0b2c36
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/signing_config.go
@@ -0,0 +1,457 @@
+// Copyright 2024 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package root
+
+import (
+ "fmt"
+ "math/rand"
+ "os"
+ "slices"
+ "time"
+
+ v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1"
+ prototrustroot "github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1"
+ "github.com/sigstore/sigstore-go/pkg/tuf"
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+const SigningConfigMediaType02 = "application/vnd.dev.sigstore.signingconfig.v0.2+json"
+
+type SigningConfig struct {
+ signingConfig *prototrustroot.SigningConfig
+}
+
+type Service struct {
+ URL string
+ MajorAPIVersion uint32
+ ValidityPeriodStart time.Time
+ ValidityPeriodEnd time.Time
+ Operator string
+}
+
+type ServiceConfiguration struct {
+ Selector prototrustroot.ServiceSelector
+ Count uint32
+}
+
+func NewService(s *prototrustroot.Service) Service {
+ validFor := s.GetValidFor()
+
+ var start time.Time
+ if validFor.GetStart() != nil {
+ start = validFor.GetStart().AsTime()
+ }
+
+ var end time.Time
+ if validFor.GetEnd() != nil {
+ end = validFor.GetEnd().AsTime()
+ }
+
+ return Service{
+ URL: s.GetUrl(),
+ MajorAPIVersion: s.GetMajorApiVersion(),
+ ValidityPeriodStart: start,
+ ValidityPeriodEnd: end,
+ Operator: s.GetOperator(),
+ }
+}
+
+// SelectService returns which service endpoint should be used based on supported API versions
+// and current time. It will select the first service with the highest API version that matches
+// the criteria. Services should be sorted from newest to oldest validity period start time, to
+// minimize how far clients need to search to find a matching service.
+func SelectService(services []Service, supportedAPIVersions []uint32, currentTime time.Time) (Service, error) {
+ if len(supportedAPIVersions) == 0 {
+ return Service{}, fmt.Errorf("no supported API versions")
+ }
+
+ // Order supported versions from highest to lowest
+ sortedVersions := make([]uint32, len(supportedAPIVersions))
+ copy(sortedVersions, supportedAPIVersions)
+ slices.Sort(sortedVersions)
+ slices.Reverse(sortedVersions)
+
+ // Order services from newest to oldest
+ sortedServices := make([]Service, len(services))
+ copy(sortedServices, services)
+ slices.SortFunc(sortedServices, func(i, j Service) int {
+ return i.ValidityPeriodStart.Compare(j.ValidityPeriodStart)
+ })
+ slices.Reverse(sortedServices)
+
+ for _, version := range sortedVersions {
+ for _, s := range sortedServices {
+ if version == s.MajorAPIVersion && s.ValidAtTime(currentTime) {
+ return s, nil
+ }
+ }
+ }
+
+ return Service{}, fmt.Errorf("no matching service found for API versions %v and current time %v", supportedAPIVersions, currentTime)
+}
+
+// SelectServices returns which service endpoints should be used based on supported API versions
+// and current time. It will use the configuration's selector to pick a set of services.
+// ALL will return all service endpoints, ANY will return a random endpoint, and
+// EXACT will return a random selection of a specified number of endpoints.
+// It will select services from the highest supported API versions and will not select
+// services from different API versions. It will select distinct service operators, selecting
+// at most one service per operator.
+func SelectServices(services []Service, config ServiceConfiguration, supportedAPIVersions []uint32, currentTime time.Time) ([]Service, error) {
+ if len(supportedAPIVersions) == 0 {
+ return nil, fmt.Errorf("no supported API versions")
+ }
+
+ // Order supported versions from highest to lowest
+ sortedVersions := make([]uint32, len(supportedAPIVersions))
+ copy(sortedVersions, supportedAPIVersions)
+ slices.Sort(sortedVersions)
+ slices.Reverse(sortedVersions)
+
+ // Order services from newest to oldest
+ sortedServices := make([]Service, len(services))
+ copy(sortedServices, services)
+ slices.SortFunc(sortedServices, func(i, j Service) int {
+ return i.ValidityPeriodStart.Compare(j.ValidityPeriodStart)
+ })
+ slices.Reverse(sortedServices)
+
+ operators := make(map[string]bool)
+ var selectedServices []Service
+ for _, version := range sortedVersions {
+ for _, s := range sortedServices {
+ if version == s.MajorAPIVersion && s.ValidAtTime(currentTime) {
+ // Select the newest service for a given operator
+ if !operators[s.Operator] {
+ operators[s.Operator] = true
+ selectedServices = append(selectedServices, s)
+ }
+ }
+ }
+ // Exit once a list of services is found
+ if len(selectedServices) != 0 {
+ break
+ }
+ }
+
+ if len(selectedServices) == 0 {
+ return nil, fmt.Errorf("no matching services found for API versions %v and current time %v", supportedAPIVersions, currentTime)
+ }
+
+ // Select services from the highest supported API version
+ switch config.Selector {
+ case prototrustroot.ServiceSelector_ALL:
+ return selectedServices, nil
+ case prototrustroot.ServiceSelector_ANY:
+ i := rand.Intn(len(selectedServices)) // #nosec G404
+ return []Service{selectedServices[i]}, nil
+ case prototrustroot.ServiceSelector_EXACT:
+ matchedUrls, err := selectExact(selectedServices, config.Count)
+ if err != nil {
+ return nil, err
+ }
+ return matchedUrls, nil
+ default:
+ return nil, fmt.Errorf("invalid service selector")
+ }
+}
+
+func selectExact[T any](slice []T, count uint32) ([]T, error) {
+ if count == 0 {
+ return nil, fmt.Errorf("service selector count must be greater than 0")
+ }
+ if int(count) > len(slice) {
+ return nil, fmt.Errorf("service selector count %d must be less than or equal to the slice length %d", count, len(slice))
+ }
+ sliceCopy := make([]T, len(slice))
+ copy(sliceCopy, slice)
+ var result []T
+ for range count {
+ i := rand.Intn(len(sliceCopy)) // #nosec G404
+ result = append(result, sliceCopy[i])
+ // Remove element from slice
+ sliceCopy[i], sliceCopy[len(sliceCopy)-1] = sliceCopy[len(sliceCopy)-1], sliceCopy[i]
+ sliceCopy = sliceCopy[:len(sliceCopy)-1]
+ }
+ return result, nil
+}
+
+func mapFunc[T, V any](ts []T, fn func(T) V) []V {
+ result := make([]V, len(ts))
+ for i, t := range ts {
+ result[i] = fn(t)
+ }
+ return result
+}
+
+func (s Service) ValidAtTime(t time.Time) bool {
+ if !s.ValidityPeriodStart.IsZero() && t.Before(s.ValidityPeriodStart) {
+ return false
+ }
+ if !s.ValidityPeriodEnd.IsZero() && t.After(s.ValidityPeriodEnd) {
+ return false
+ }
+ return true
+}
+
+func (s Service) ToServiceProtobuf() *prototrustroot.Service {
+ tr := &v1.TimeRange{
+ Start: timestamppb.New(s.ValidityPeriodStart),
+ }
+ if !s.ValidityPeriodEnd.IsZero() {
+ tr.End = timestamppb.New(s.ValidityPeriodEnd)
+ }
+
+ return &prototrustroot.Service{
+ Url: s.URL,
+ MajorApiVersion: s.MajorAPIVersion,
+ ValidFor: tr,
+ Operator: s.Operator,
+ }
+}
+
+func (sc ServiceConfiguration) ToConfigProtobuf() *prototrustroot.ServiceConfiguration {
+ return &prototrustroot.ServiceConfiguration{
+ Selector: sc.Selector,
+ Count: sc.Count,
+ }
+}
+
+func (sc *SigningConfig) FulcioCertificateAuthorityURLs() []Service {
+ var services []Service
+
+ for _, s := range sc.signingConfig.GetCaUrls() {
+ services = append(services, NewService(s))
+ }
+ return services
+}
+
+func (sc *SigningConfig) OIDCProviderURLs() []Service {
+ var services []Service
+ for _, s := range sc.signingConfig.GetOidcUrls() {
+ services = append(services, NewService(s))
+ }
+ return services
+}
+
+func (sc *SigningConfig) RekorLogURLs() []Service {
+ var services []Service
+ for _, s := range sc.signingConfig.GetRekorTlogUrls() {
+ services = append(services, NewService(s))
+ }
+ return services
+}
+
+func (sc *SigningConfig) RekorLogURLsConfig() ServiceConfiguration {
+ c := sc.signingConfig.GetRekorTlogConfig()
+ return ServiceConfiguration{
+ Selector: c.Selector,
+ Count: c.Count,
+ }
+}
+
+func (sc *SigningConfig) TimestampAuthorityURLs() []Service {
+ var services []Service
+ for _, s := range sc.signingConfig.GetTsaUrls() {
+ services = append(services, NewService(s))
+ }
+ return services
+}
+
+func (sc *SigningConfig) TimestampAuthorityURLsConfig() ServiceConfiguration {
+ c := sc.signingConfig.GetTsaConfig()
+ return ServiceConfiguration{
+ Selector: c.Selector,
+ Count: c.Count,
+ }
+}
+
+func (sc *SigningConfig) WithFulcioCertificateAuthorityURLs(fulcioURLs ...Service) *SigningConfig {
+ var services []*prototrustroot.Service
+ for _, u := range fulcioURLs {
+ services = append(services, u.ToServiceProtobuf())
+ }
+ sc.signingConfig.CaUrls = services
+ return sc
+}
+
+func (sc *SigningConfig) AddFulcioCertificateAuthorityURLs(fulcioURLs ...Service) *SigningConfig {
+ for _, u := range fulcioURLs {
+ sc.signingConfig.CaUrls = append(sc.signingConfig.CaUrls, u.ToServiceProtobuf())
+ }
+ return sc
+}
+
+func (sc *SigningConfig) WithOIDCProviderURLs(oidcURLs ...Service) *SigningConfig {
+ var services []*prototrustroot.Service
+ for _, u := range oidcURLs {
+ services = append(services, u.ToServiceProtobuf())
+ }
+ sc.signingConfig.OidcUrls = services
+ return sc
+}
+
+func (sc *SigningConfig) AddOIDCProviderURLs(oidcURLs ...Service) *SigningConfig {
+ for _, u := range oidcURLs {
+ sc.signingConfig.OidcUrls = append(sc.signingConfig.OidcUrls, u.ToServiceProtobuf())
+ }
+ return sc
+}
+
+func (sc *SigningConfig) WithRekorLogURLs(logURLs ...Service) *SigningConfig {
+ var services []*prototrustroot.Service
+ for _, u := range logURLs {
+ services = append(services, u.ToServiceProtobuf())
+ }
+ sc.signingConfig.RekorTlogUrls = services
+ return sc
+}
+
+func (sc *SigningConfig) AddRekorLogURLs(logURLs ...Service) *SigningConfig {
+ for _, u := range logURLs {
+ sc.signingConfig.RekorTlogUrls = append(sc.signingConfig.RekorTlogUrls, u.ToServiceProtobuf())
+ }
+ return sc
+}
+
+func (sc *SigningConfig) WithRekorTlogConfig(selector prototrustroot.ServiceSelector, count uint32) *SigningConfig {
+ sc.signingConfig.RekorTlogConfig.Selector = selector
+ sc.signingConfig.RekorTlogConfig.Count = count
+ return sc
+}
+
+func (sc *SigningConfig) WithTimestampAuthorityURLs(tsaURLs ...Service) *SigningConfig {
+ var services []*prototrustroot.Service
+ for _, u := range tsaURLs {
+ services = append(services, u.ToServiceProtobuf())
+ }
+ sc.signingConfig.TsaUrls = services
+ return sc
+}
+
+func (sc *SigningConfig) AddTimestampAuthorityURLs(tsaURLs ...Service) *SigningConfig {
+ for _, u := range tsaURLs {
+ sc.signingConfig.TsaUrls = append(sc.signingConfig.TsaUrls, u.ToServiceProtobuf())
+ }
+ return sc
+}
+
+func (sc *SigningConfig) WithTsaConfig(selector prototrustroot.ServiceSelector, count uint32) *SigningConfig {
+ sc.signingConfig.TsaConfig.Selector = selector
+ sc.signingConfig.TsaConfig.Count = count
+ return sc
+}
+
+func (sc SigningConfig) String() string {
+ return fmt.Sprintf("{CA: %v, OIDC: %v, RekorLogs: %v, TSAs: %v, MediaType: %s}",
+ sc.FulcioCertificateAuthorityURLs(),
+ sc.OIDCProviderURLs(),
+ sc.RekorLogURLs(),
+ sc.TimestampAuthorityURLs(),
+ SigningConfigMediaType02)
+}
+
+func (sc SigningConfig) MarshalJSON() ([]byte, error) {
+ return protojson.Marshal(sc.signingConfig)
+}
+
+// NewSigningConfig initializes a SigningConfig object from a mediaType string, Fulcio certificate
+// authority URLs, OIDC provider URLs, Rekor transparency log URLs, timestamp authorities URLs,
+// selection criteria for Rekor logs and TSAs.
+func NewSigningConfig(mediaType string,
+ fulcioCertificateAuthorities []Service,
+ oidcProviders []Service,
+ rekorLogs []Service,
+ rekorLogsConfig ServiceConfiguration,
+ timestampAuthorities []Service,
+ timestampAuthoritiesConfig ServiceConfiguration) (*SigningConfig, error) {
+ if mediaType != SigningConfigMediaType02 {
+ return nil, fmt.Errorf("unsupported SigningConfig media type, must be: %s", SigningConfigMediaType02)
+ }
+ sc := &SigningConfig{
+ signingConfig: &prototrustroot.SigningConfig{
+ MediaType: mediaType,
+ CaUrls: mapFunc(fulcioCertificateAuthorities, Service.ToServiceProtobuf),
+ OidcUrls: mapFunc(oidcProviders, Service.ToServiceProtobuf),
+ RekorTlogUrls: mapFunc(rekorLogs, Service.ToServiceProtobuf),
+ RekorTlogConfig: rekorLogsConfig.ToConfigProtobuf(),
+ TsaUrls: mapFunc(timestampAuthorities, Service.ToServiceProtobuf),
+ TsaConfig: timestampAuthoritiesConfig.ToConfigProtobuf(),
+ },
+ }
+ return sc, nil
+}
+
+// NewSigningConfigFromProtobuf returns a Sigstore signing configuration.
+func NewSigningConfigFromProtobuf(sc *prototrustroot.SigningConfig) (*SigningConfig, error) {
+ if sc.GetMediaType() != SigningConfigMediaType02 {
+ return nil, fmt.Errorf("unsupported SigningConfig media type: %s", sc.GetMediaType())
+ }
+ return &SigningConfig{signingConfig: sc}, nil
+}
+
+// NewSigningConfigFromPath returns a Sigstore signing configuration from a file.
+func NewSigningConfigFromPath(path string) (*SigningConfig, error) {
+ scJSON, err := os.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return NewSigningConfigFromJSON(scJSON)
+}
+
+// NewSigningConfigFromJSON returns a Sigstore signing configuration from JSON.
+func NewSigningConfigFromJSON(rootJSON []byte) (*SigningConfig, error) {
+ pbSC, err := NewSigningConfigProtobuf(rootJSON)
+ if err != nil {
+ return nil, err
+ }
+
+ return NewSigningConfigFromProtobuf(pbSC)
+}
+
+// NewSigningConfigProtobuf returns a Sigstore signing configuration as a protobuf.
+func NewSigningConfigProtobuf(scJSON []byte) (*prototrustroot.SigningConfig, error) {
+ pbSC := &prototrustroot.SigningConfig{}
+ err := protojson.Unmarshal(scJSON, pbSC)
+ if err != nil {
+ return nil, err
+ }
+ return pbSC, nil
+}
+
+// FetchSigningConfig fetches the public-good Sigstore signing configuration from TUF.
+func FetchSigningConfig() (*SigningConfig, error) {
+ return FetchSigningConfigWithOptions(tuf.DefaultOptions())
+}
+
+// FetchSigningConfig fetches the public-good Sigstore signing configuration with the given options from TUF.
+func FetchSigningConfigWithOptions(opts *tuf.Options) (*SigningConfig, error) {
+ client, err := tuf.New(opts)
+ if err != nil {
+ return nil, err
+ }
+ return GetSigningConfig(client)
+}
+
+// GetSigningConfig fetches the public-good Sigstore signing configuration target from TUF.
+func GetSigningConfig(c *tuf.Client) (*SigningConfig, error) {
+ jsonBytes, err := c.GetTarget("signing_config.v0.2.json")
+ if err != nil {
+ return nil, err
+ }
+ return NewSigningConfigFromJSON(jsonBytes)
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/timestamping_authority.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/timestamping_authority.go
new file mode 100644
index 00000000000..bde952cc116
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/timestamping_authority.go
@@ -0,0 +1,75 @@
+// Copyright 2024 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package root
+
+import (
+ "bytes"
+ "crypto/x509"
+ "errors"
+ "time"
+
+ tsaverification "github.com/sigstore/timestamp-authority/v2/pkg/verification"
+)
+
+type Timestamp struct {
+ Time time.Time
+ URI string
+}
+
+type TimestampingAuthority interface {
+ Verify(signedTimestamp []byte, signatureBytes []byte) (*Timestamp, error)
+}
+
+type SigstoreTimestampingAuthority struct {
+ Root *x509.Certificate
+ Intermediates []*x509.Certificate
+ Leaf *x509.Certificate
+ ValidityPeriodStart time.Time
+ ValidityPeriodEnd time.Time
+ URI string
+}
+
+var _ TimestampingAuthority = &SigstoreTimestampingAuthority{}
+
+func (tsa *SigstoreTimestampingAuthority) Verify(signedTimestamp []byte, signatureBytes []byte) (*Timestamp, error) {
+ if tsa.Root == nil {
+ var tsaURIDisplay string
+ if tsa.URI != "" {
+ tsaURIDisplay = tsa.URI + " "
+ }
+ return nil, errors.New("timestamping authority " + tsaURIDisplay + "root certificate is nil")
+ }
+ trustedRootVerificationOptions := tsaverification.VerifyOpts{
+ Roots: []*x509.Certificate{tsa.Root},
+ Intermediates: tsa.Intermediates,
+ TSACertificate: tsa.Leaf,
+ }
+
+ // Ensure timestamp responses are from trusted sources
+ timestamp, err := tsaverification.VerifyTimestampResponse(signedTimestamp, bytes.NewReader(signatureBytes), trustedRootVerificationOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ if !tsa.ValidityPeriodStart.IsZero() && timestamp.Time.Before(tsa.ValidityPeriodStart) {
+ return nil, errors.New("timestamp is before the validity period start")
+ }
+ if !tsa.ValidityPeriodEnd.IsZero() && timestamp.Time.After(tsa.ValidityPeriodEnd) {
+ return nil, errors.New("timestamp is after the validity period end")
+ }
+
+ // All above verification successful, so return nil
+ return &Timestamp{Time: timestamp.Time, URI: tsa.URI}, nil
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_material.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_material.go
new file mode 100644
index 00000000000..d1ec4d46189
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_material.go
@@ -0,0 +1,172 @@
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package root
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/sigstore/sigstore/pkg/signature"
+)
+
+type TrustedMaterial interface {
+ TimestampingAuthorities() []TimestampingAuthority
+ FulcioCertificateAuthorities() []CertificateAuthority
+ RekorLogs() map[string]*TransparencyLog
+ CTLogs() map[string]*TransparencyLog
+ PublicKeyVerifier(string) (TimeConstrainedVerifier, error)
+}
+
+type BaseTrustedMaterial struct{}
+
+func (b *BaseTrustedMaterial) TimestampingAuthorities() []TimestampingAuthority {
+ return []TimestampingAuthority{}
+}
+
+func (b *BaseTrustedMaterial) FulcioCertificateAuthorities() []CertificateAuthority {
+ return []CertificateAuthority{}
+}
+
+func (b *BaseTrustedMaterial) RekorLogs() map[string]*TransparencyLog {
+ return map[string]*TransparencyLog{}
+}
+
+func (b *BaseTrustedMaterial) CTLogs() map[string]*TransparencyLog {
+ return map[string]*TransparencyLog{}
+}
+
+func (b *BaseTrustedMaterial) PublicKeyVerifier(_ string) (TimeConstrainedVerifier, error) {
+ return nil, fmt.Errorf("public key verifier not found")
+}
+
+type TrustedMaterialCollection []TrustedMaterial
+
+// Ensure types implement interfaces
+var _ TrustedMaterial = &BaseTrustedMaterial{}
+var _ TrustedMaterial = TrustedMaterialCollection{}
+
+func (tmc TrustedMaterialCollection) PublicKeyVerifier(keyID string) (TimeConstrainedVerifier, error) {
+ for _, tm := range tmc {
+ verifier, err := tm.PublicKeyVerifier(keyID)
+ if err == nil {
+ return verifier, nil
+ }
+ }
+ return nil, fmt.Errorf("public key verifier not found for keyID: %s", keyID)
+}
+
+func (tmc TrustedMaterialCollection) TimestampingAuthorities() []TimestampingAuthority {
+ var timestampingAuthorities []TimestampingAuthority
+ for _, tm := range tmc {
+ timestampingAuthorities = append(timestampingAuthorities, tm.TimestampingAuthorities()...)
+ }
+ return timestampingAuthorities
+}
+
+func (tmc TrustedMaterialCollection) FulcioCertificateAuthorities() []CertificateAuthority {
+ var certAuthorities []CertificateAuthority
+ for _, tm := range tmc {
+ certAuthorities = append(certAuthorities, tm.FulcioCertificateAuthorities()...)
+ }
+ return certAuthorities
+}
+
+func (tmc TrustedMaterialCollection) RekorLogs() map[string]*TransparencyLog {
+ rekorLogs := make(map[string]*TransparencyLog)
+ for _, tm := range tmc {
+ for keyID, tlogVerifier := range tm.RekorLogs() {
+ rekorLogs[keyID] = tlogVerifier
+ }
+ }
+ return rekorLogs
+}
+
+func (tmc TrustedMaterialCollection) CTLogs() map[string]*TransparencyLog {
+ rekorLogs := make(map[string]*TransparencyLog)
+ for _, tm := range tmc {
+ for keyID, tlogVerifier := range tm.CTLogs() {
+ rekorLogs[keyID] = tlogVerifier
+ }
+ }
+ return rekorLogs
+}
+
+type ValidityPeriodChecker interface {
+ ValidAtTime(time.Time) bool
+}
+
+type TimeConstrainedVerifier interface {
+ ValidityPeriodChecker
+ signature.Verifier
+}
+
+type TrustedPublicKeyMaterial struct {
+ BaseTrustedMaterial
+ publicKeyVerifier func(string) (TimeConstrainedVerifier, error)
+}
+
+func (tr *TrustedPublicKeyMaterial) PublicKeyVerifier(keyID string) (TimeConstrainedVerifier, error) {
+ return tr.publicKeyVerifier(keyID)
+}
+
+func NewTrustedPublicKeyMaterial(publicKeyVerifier func(string) (TimeConstrainedVerifier, error)) *TrustedPublicKeyMaterial {
+ return &TrustedPublicKeyMaterial{
+ publicKeyVerifier: publicKeyVerifier,
+ }
+}
+
+// ExpiringKey is a TimeConstrainedVerifier with a static validity period.
+type ExpiringKey struct {
+ signature.Verifier
+ validityPeriodStart time.Time
+ validityPeriodEnd time.Time
+}
+
+var _ TimeConstrainedVerifier = &ExpiringKey{}
+
+// ValidAtTime returns true if the key is valid at the given time. If the
+// validity period start time is not set, the key is considered valid for all
+// times before the end time. Likewise, if the validity period end time is not
+// set, the key is considered valid for all times after the start time.
+func (k *ExpiringKey) ValidAtTime(t time.Time) bool {
+ if !k.validityPeriodStart.IsZero() && t.Before(k.validityPeriodStart) {
+ return false
+ }
+ if !k.validityPeriodEnd.IsZero() && t.After(k.validityPeriodEnd) {
+ return false
+ }
+ return true
+}
+
+// NewExpiringKey returns a new ExpiringKey with the given validity period
+func NewExpiringKey(verifier signature.Verifier, validityPeriodStart, validityPeriodEnd time.Time) *ExpiringKey {
+ return &ExpiringKey{
+ Verifier: verifier,
+ validityPeriodStart: validityPeriodStart,
+ validityPeriodEnd: validityPeriodEnd,
+ }
+}
+
+// NewTrustedPublicKeyMaterialFromMapping returns a TrustedPublicKeyMaterial from a map of key IDs to
+// ExpiringKeys.
+func NewTrustedPublicKeyMaterialFromMapping(trustedPublicKeys map[string]*ExpiringKey) *TrustedPublicKeyMaterial {
+ return NewTrustedPublicKeyMaterial(func(keyID string) (TimeConstrainedVerifier, error) {
+ expiringKey, ok := trustedPublicKeys[keyID]
+ if !ok {
+ return nil, fmt.Errorf("public key not found for keyID: %s", keyID)
+ }
+ return expiringKey, nil
+ })
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root.go
new file mode 100644
index 00000000000..0bd69d6f148
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root.go
@@ -0,0 +1,556 @@
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package root
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/hex"
+ "fmt"
+ "log"
+ "os"
+ "sync"
+ "time"
+
+ protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1"
+ prototrustroot "github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1"
+ "github.com/sigstore/sigstore-go/pkg/tuf"
+ "google.golang.org/protobuf/encoding/protojson"
+)
+
+const TrustedRootMediaType01 = "application/vnd.dev.sigstore.trustedroot+json;version=0.1"
+
+type TrustedRoot struct {
+ BaseTrustedMaterial
+ trustedRoot *prototrustroot.TrustedRoot
+ rekorLogs map[string]*TransparencyLog
+ certificateAuthorities []CertificateAuthority
+ ctLogs map[string]*TransparencyLog
+ timestampingAuthorities []TimestampingAuthority
+}
+
+type TransparencyLog struct {
+ BaseURL string
+ ID []byte
+ ValidityPeriodStart time.Time
+ ValidityPeriodEnd time.Time
+ // This is the hash algorithm used by the Merkle tree
+ HashFunc crypto.Hash
+ PublicKey crypto.PublicKey
+ // The hash algorithm used during signature creation
+ SignatureHashFunc crypto.Hash
+}
+
+const (
+ defaultTrustedRoot = "trusted_root.json"
+)
+
+func (tr *TrustedRoot) TimestampingAuthorities() []TimestampingAuthority {
+ return tr.timestampingAuthorities
+}
+
+func (tr *TrustedRoot) FulcioCertificateAuthorities() []CertificateAuthority {
+ return tr.certificateAuthorities
+}
+
+func (tr *TrustedRoot) RekorLogs() map[string]*TransparencyLog {
+ return tr.rekorLogs
+}
+
+func (tr *TrustedRoot) CTLogs() map[string]*TransparencyLog {
+ return tr.ctLogs
+}
+
+func (tr *TrustedRoot) MarshalJSON() ([]byte, error) {
+ err := tr.constructProtoTrustRoot()
+ if err != nil {
+ return nil, fmt.Errorf("failed constructing protobuf TrustRoot representation: %w", err)
+ }
+
+ return protojson.Marshal(tr.trustedRoot)
+}
+
+func NewTrustedRootFromProtobuf(protobufTrustedRoot *prototrustroot.TrustedRoot) (trustedRoot *TrustedRoot, err error) {
+ if protobufTrustedRoot.GetMediaType() != TrustedRootMediaType01 {
+ return nil, fmt.Errorf("unsupported TrustedRoot media type: %s", protobufTrustedRoot.GetMediaType())
+ }
+
+ trustedRoot = &TrustedRoot{trustedRoot: protobufTrustedRoot}
+ trustedRoot.rekorLogs, err = ParseTransparencyLogs(protobufTrustedRoot.GetTlogs())
+ if err != nil {
+ return nil, err
+ }
+
+ trustedRoot.certificateAuthorities, err = ParseCertificateAuthorities(protobufTrustedRoot.GetCertificateAuthorities())
+ if err != nil {
+ return nil, err
+ }
+
+ trustedRoot.timestampingAuthorities, err = ParseTimestampingAuthorities(protobufTrustedRoot.GetTimestampAuthorities())
+ if err != nil {
+ return nil, err
+ }
+
+ trustedRoot.ctLogs, err = ParseTransparencyLogs(protobufTrustedRoot.GetCtlogs())
+ if err != nil {
+ return nil, err
+ }
+
+ return trustedRoot, nil
+}
+
+func ParseTransparencyLogs(tlogs []*prototrustroot.TransparencyLogInstance) (transparencyLogs map[string]*TransparencyLog, err error) {
+ transparencyLogs = make(map[string]*TransparencyLog)
+ for _, tlog := range tlogs {
+ if tlog.GetHashAlgorithm() != protocommon.HashAlgorithm_SHA2_256 {
+ return nil, fmt.Errorf("unsupported tlog hash algorithm: %s", tlog.GetHashAlgorithm())
+ }
+ if tlog.GetLogId() == nil {
+ return nil, fmt.Errorf("tlog missing log ID")
+ }
+ if tlog.GetLogId().GetKeyId() == nil {
+ return nil, fmt.Errorf("tlog missing log ID key ID")
+ }
+ encodedKeyID := hex.EncodeToString(tlog.GetLogId().GetKeyId())
+
+ if tlog.GetPublicKey() == nil {
+ return nil, fmt.Errorf("tlog missing public key")
+ }
+ if tlog.GetPublicKey().GetRawBytes() == nil {
+ return nil, fmt.Errorf("tlog missing public key raw bytes")
+ }
+
+ var hashFunc crypto.Hash
+ switch tlog.GetHashAlgorithm() {
+ case protocommon.HashAlgorithm_SHA2_256:
+ hashFunc = crypto.SHA256
+ default:
+ return nil, fmt.Errorf("unsupported hash function for the tlog")
+ }
+
+ tlogEntry := &TransparencyLog{
+ BaseURL: tlog.GetBaseUrl(),
+ ID: tlog.GetLogId().GetKeyId(),
+ HashFunc: hashFunc,
+ SignatureHashFunc: crypto.SHA256,
+ }
+
+ switch tlog.GetPublicKey().GetKeyDetails() {
+ case protocommon.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256,
+ protocommon.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384,
+ protocommon.PublicKeyDetails_PKIX_ECDSA_P521_SHA_512:
+ key, err := x509.ParsePKIXPublicKey(tlog.GetPublicKey().GetRawBytes())
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse public key for tlog: %s %w",
+ tlog.GetBaseUrl(),
+ err,
+ )
+ }
+ var ecKey *ecdsa.PublicKey
+ var ok bool
+ if ecKey, ok = key.(*ecdsa.PublicKey); !ok {
+ return nil, fmt.Errorf("tlog public key is not ECDSA: %s", tlog.GetPublicKey().GetKeyDetails())
+ }
+ tlogEntry.PublicKey = ecKey
+ // This key format has public key in PKIX RSA format and PKCS1#1v1.5 or RSASSA-PSS signature
+ case protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256,
+ protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256,
+ protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256:
+ key, err := x509.ParsePKIXPublicKey(tlog.GetPublicKey().GetRawBytes())
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse public key for tlog: %s %w",
+ tlog.GetBaseUrl(),
+ err,
+ )
+ }
+ var rsaKey *rsa.PublicKey
+ var ok bool
+ if rsaKey, ok = key.(*rsa.PublicKey); !ok {
+ return nil, fmt.Errorf("tlog public key is not RSA: %s", tlog.GetPublicKey().GetKeyDetails())
+ }
+ tlogEntry.PublicKey = rsaKey
+ case protocommon.PublicKeyDetails_PKIX_ED25519:
+ key, err := x509.ParsePKIXPublicKey(tlog.GetPublicKey().GetRawBytes())
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse public key for tlog: %s %w",
+ tlog.GetBaseUrl(),
+ err,
+ )
+ }
+ var edKey ed25519.PublicKey
+ var ok bool
+ if edKey, ok = key.(ed25519.PublicKey); !ok {
+ return nil, fmt.Errorf("tlog public key is not RSA: %s", tlog.GetPublicKey().GetKeyDetails())
+ }
+ tlogEntry.PublicKey = edKey
+ // This key format is deprecated, but currently in use for Sigstore staging instance
+ case protocommon.PublicKeyDetails_PKCS1_RSA_PKCS1V5: //nolint:staticcheck
+ key, err := x509.ParsePKCS1PublicKey(tlog.GetPublicKey().GetRawBytes())
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse public key for tlog: %s %w",
+ tlog.GetBaseUrl(),
+ err,
+ )
+ }
+ tlogEntry.PublicKey = key
+ default:
+ return nil, fmt.Errorf("unsupported tlog public key type: %s", tlog.GetPublicKey().GetKeyDetails())
+ }
+
+ tlogEntry.SignatureHashFunc = getSignatureHashAlgo(tlogEntry.PublicKey)
+ transparencyLogs[encodedKeyID] = tlogEntry
+
+ if validFor := tlog.GetPublicKey().GetValidFor(); validFor != nil {
+ if validFor.GetStart() != nil {
+ transparencyLogs[encodedKeyID].ValidityPeriodStart = validFor.GetStart().AsTime()
+ } else {
+ return nil, fmt.Errorf("tlog missing public key validity period start time")
+ }
+ if validFor.GetEnd() != nil {
+ transparencyLogs[encodedKeyID].ValidityPeriodEnd = validFor.GetEnd().AsTime()
+ }
+ } else {
+ return nil, fmt.Errorf("tlog missing public key validity period")
+ }
+ }
+ return transparencyLogs, nil
+}
+
+func ParseCertificateAuthorities(certAuthorities []*prototrustroot.CertificateAuthority) (certificateAuthorities []CertificateAuthority, err error) {
+ certificateAuthorities = make([]CertificateAuthority, len(certAuthorities))
+ for i, certAuthority := range certAuthorities {
+ certificateAuthority, err := ParseCertificateAuthority(certAuthority)
+ if err != nil {
+ return nil, err
+ }
+ certificateAuthorities[i] = certificateAuthority
+ }
+ return certificateAuthorities, nil
+}
+
+func ParseCertificateAuthority(certAuthority *prototrustroot.CertificateAuthority) (*FulcioCertificateAuthority, error) {
+ if certAuthority == nil {
+ return nil, fmt.Errorf("CertificateAuthority is nil")
+ }
+ certChain := certAuthority.GetCertChain()
+ if certChain == nil {
+ return nil, fmt.Errorf("CertificateAuthority missing cert chain")
+ }
+ chainLen := len(certChain.GetCertificates())
+ if chainLen < 1 {
+ return nil, fmt.Errorf("CertificateAuthority cert chain is empty")
+ }
+
+ certificateAuthority := &FulcioCertificateAuthority{
+ URI: certAuthority.Uri,
+ }
+ for i, cert := range certChain.GetCertificates() {
+ parsedCert, err := x509.ParseCertificate(cert.RawBytes)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse certificate for %s %w",
+ certAuthority.Uri,
+ err,
+ )
+ }
+ if i < chainLen-1 {
+ certificateAuthority.Intermediates = append(certificateAuthority.Intermediates, parsedCert)
+ } else {
+ certificateAuthority.Root = parsedCert
+ }
+ }
+ validFor := certAuthority.GetValidFor()
+ if validFor != nil {
+ start := validFor.GetStart()
+ if start != nil {
+ certificateAuthority.ValidityPeriodStart = start.AsTime()
+ }
+ end := validFor.GetEnd()
+ if end != nil {
+ certificateAuthority.ValidityPeriodEnd = end.AsTime()
+ }
+ }
+
+ certificateAuthority.URI = certAuthority.Uri
+
+ return certificateAuthority, nil
+}
+
+func ParseTimestampingAuthorities(certAuthorities []*prototrustroot.CertificateAuthority) (timestampingAuthorities []TimestampingAuthority, err error) {
+ timestampingAuthorities = make([]TimestampingAuthority, len(certAuthorities))
+ for i, certAuthority := range certAuthorities {
+ timestampingAuthority, err := ParseTimestampingAuthority(certAuthority)
+ if err != nil {
+ return nil, err
+ }
+ timestampingAuthorities[i] = timestampingAuthority
+ }
+ return timestampingAuthorities, nil
+}
+
+func ParseTimestampingAuthority(certAuthority *prototrustroot.CertificateAuthority) (TimestampingAuthority, error) {
+ if certAuthority == nil {
+ return nil, fmt.Errorf("CertificateAuthority is nil")
+ }
+ certChain := certAuthority.GetCertChain()
+ if certChain == nil {
+ return nil, fmt.Errorf("CertificateAuthority missing cert chain")
+ }
+ chainLen := len(certChain.GetCertificates())
+ if chainLen < 1 {
+ return nil, fmt.Errorf("CertificateAuthority cert chain is empty")
+ }
+
+ timestampingAuthority := &SigstoreTimestampingAuthority{
+ URI: certAuthority.Uri,
+ }
+ for i, cert := range certChain.GetCertificates() {
+ parsedCert, err := x509.ParseCertificate(cert.RawBytes)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse certificate for %s %w",
+ certAuthority.Uri,
+ err,
+ )
+ }
+ switch {
+ case i == 0 && !parsedCert.IsCA:
+ timestampingAuthority.Leaf = parsedCert
+ case i < chainLen-1:
+ timestampingAuthority.Intermediates = append(timestampingAuthority.Intermediates, parsedCert)
+ case i == chainLen-1:
+ timestampingAuthority.Root = parsedCert
+ }
+ }
+ validFor := certAuthority.GetValidFor()
+ if validFor != nil {
+ start := validFor.GetStart()
+ if start != nil {
+ timestampingAuthority.ValidityPeriodStart = start.AsTime()
+ }
+ end := validFor.GetEnd()
+ if end != nil {
+ timestampingAuthority.ValidityPeriodEnd = end.AsTime()
+ }
+ }
+
+ timestampingAuthority.URI = certAuthority.Uri
+
+ return timestampingAuthority, nil
+}
+
+func NewTrustedRootFromPath(path string) (*TrustedRoot, error) {
+ trustedrootJSON, err := os.ReadFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read trusted root %w",
+ err,
+ )
+ }
+
+ return NewTrustedRootFromJSON(trustedrootJSON)
+}
+
+// NewTrustedRootFromJSON returns the Sigstore trusted root.
+func NewTrustedRootFromJSON(rootJSON []byte) (*TrustedRoot, error) {
+ pbTrustedRoot, err := NewTrustedRootProtobuf(rootJSON)
+ if err != nil {
+ return nil, err
+ }
+
+ return NewTrustedRootFromProtobuf(pbTrustedRoot)
+}
+
+// NewTrustedRootProtobuf returns the Sigstore trusted root as a protobuf.
+func NewTrustedRootProtobuf(rootJSON []byte) (*prototrustroot.TrustedRoot, error) {
+ pbTrustedRoot := &prototrustroot.TrustedRoot{}
+ err := protojson.Unmarshal(rootJSON, pbTrustedRoot)
+ if err != nil {
+ return nil, fmt.Errorf("failed to proto-json unmarshal trusted root: %w", err)
+ }
+ return pbTrustedRoot, nil
+}
+
+// NewTrustedRoot initializes a TrustedRoot object from a mediaType string, list of Fulcio
+// certificate authorities, list of timestamp authorities and maps of ctlogs and rekor
+// transparency log instances.
+// mediaType must be TrustedRootMediaType01 ("application/vnd.dev.sigstore.trustedroot+json;version=0.1").
+func NewTrustedRoot(mediaType string,
+ certificateAuthorities []CertificateAuthority,
+ certificateTransparencyLogs map[string]*TransparencyLog,
+ timestampAuthorities []TimestampingAuthority,
+ transparencyLogs map[string]*TransparencyLog) (*TrustedRoot, error) {
+ // document that we assume 1 cert chain per target and with certs already ordered from leaf to root
+ if mediaType != TrustedRootMediaType01 {
+ return nil, fmt.Errorf("unsupported TrustedRoot media type: %s, must be %s", mediaType, TrustedRootMediaType01)
+ }
+ tr := &TrustedRoot{
+ certificateAuthorities: certificateAuthorities,
+ ctLogs: certificateTransparencyLogs,
+ timestampingAuthorities: timestampAuthorities,
+ rekorLogs: transparencyLogs,
+ }
+ return tr, nil
+}
+
+// FetchTrustedRoot fetches the Sigstore trusted root from TUF and returns it.
+func FetchTrustedRoot() (*TrustedRoot, error) {
+ return FetchTrustedRootWithOptions(tuf.DefaultOptions())
+}
+
+// FetchTrustedRootWithOptions fetches the trusted root from TUF with the given options and returns it.
+func FetchTrustedRootWithOptions(opts *tuf.Options) (*TrustedRoot, error) {
+ client, err := tuf.New(opts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create TUF client %w", err)
+ }
+ return GetTrustedRoot(client)
+}
+
+// GetTrustedRoot returns the trusted root
+func GetTrustedRoot(c *tuf.Client) (*TrustedRoot, error) {
+ jsonBytes, err := c.GetTarget(defaultTrustedRoot)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get trusted root from TUF client %w",
+ err,
+ )
+ }
+ return NewTrustedRootFromJSON(jsonBytes)
+}
+
+func getSignatureHashAlgo(pubKey crypto.PublicKey) crypto.Hash {
+ var h crypto.Hash
+ switch pk := pubKey.(type) {
+ case *rsa.PublicKey:
+ h = crypto.SHA256
+ case *ecdsa.PublicKey:
+ switch pk.Curve {
+ case elliptic.P256():
+ h = crypto.SHA256
+ case elliptic.P384():
+ h = crypto.SHA384
+ case elliptic.P521():
+ h = crypto.SHA512
+ default:
+ h = crypto.SHA256
+ }
+ case ed25519.PublicKey:
+ h = crypto.SHA512
+ default:
+ h = crypto.SHA256
+ }
+ return h
+}
+
+// LiveTrustedRoot is a wrapper around TrustedRoot that periodically
+// refreshes the trusted root from TUF. This is needed for long-running
+// processes to ensure that the trusted root does not expire.
+type LiveTrustedRoot struct {
+ *TrustedRoot
+ mu sync.RWMutex
+}
+
+// NewLiveTrustedRoot returns a LiveTrustedRoot that will periodically
+// refresh the trusted root from TUF.
+func NewLiveTrustedRoot(opts *tuf.Options) (*LiveTrustedRoot, error) {
+ return NewLiveTrustedRootFromTarget(opts, defaultTrustedRoot)
+}
+
+// NewLiveTrustedRootFromTarget returns a LiveTrustedRoot that will
+// periodically refresh the trusted root from TUF using the provided target.
+func NewLiveTrustedRootFromTarget(opts *tuf.Options, target string) (*LiveTrustedRoot, error) {
+ return NewLiveTrustedRootFromTargetWithPeriod(opts, target, 24*time.Hour)
+}
+
+// NewLiveTrustedRootFromTargetWithPeriod returns a LiveTrustedRoot that
+// performs a TUF refresh with the provided period, accesssing the provided
+// target.
+func NewLiveTrustedRootFromTargetWithPeriod(opts *tuf.Options, target string, rfPeriod time.Duration) (*LiveTrustedRoot, error) {
+ client, err := tuf.New(opts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create TUF client %w", err)
+ }
+
+ b, err := client.GetTarget(target)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get target from TUF client %w", err)
+ }
+
+ tr, err := NewTrustedRootFromJSON(b)
+ if err != nil {
+ return nil, err
+ }
+ ltr := &LiveTrustedRoot{
+ TrustedRoot: tr,
+ mu: sync.RWMutex{},
+ }
+
+ ticker := time.NewTicker(rfPeriod)
+ go func() {
+ for range ticker.C {
+ client, err = tuf.New(opts)
+ if err != nil {
+ log.Printf("error creating TUF client: %v", err)
+ }
+
+ b, err := client.GetTarget(target)
+ if err != nil {
+ log.Printf("error fetching trusted root: %v", err)
+ }
+
+ newTr, err := NewTrustedRootFromJSON(b)
+ if err != nil {
+ log.Printf("error fetching trusted root: %v", err)
+ continue
+ }
+ ltr.mu.Lock()
+ ltr.TrustedRoot = newTr
+ ltr.mu.Unlock()
+ log.Printf("successfully refreshed the TUF root")
+ }
+ }()
+ return ltr, nil
+}
+
+func (l *LiveTrustedRoot) TimestampingAuthorities() []TimestampingAuthority {
+ l.mu.RLock()
+ defer l.mu.RUnlock()
+ return l.TrustedRoot.TimestampingAuthorities()
+}
+
+func (l *LiveTrustedRoot) FulcioCertificateAuthorities() []CertificateAuthority {
+ l.mu.RLock()
+ defer l.mu.RUnlock()
+ return l.TrustedRoot.FulcioCertificateAuthorities()
+}
+
+func (l *LiveTrustedRoot) RekorLogs() map[string]*TransparencyLog {
+ l.mu.RLock()
+ defer l.mu.RUnlock()
+ return l.TrustedRoot.RekorLogs()
+}
+
+func (l *LiveTrustedRoot) CTLogs() map[string]*TransparencyLog {
+ l.mu.RLock()
+ defer l.mu.RUnlock()
+ return l.TrustedRoot.CTLogs()
+}
+
+func (l *LiveTrustedRoot) PublicKeyVerifier(keyID string) (TimeConstrainedVerifier, error) {
+ l.mu.RLock()
+ defer l.mu.RUnlock()
+ return l.TrustedRoot.PublicKeyVerifier(keyID)
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root_create.go b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root_create.go
new file mode 100644
index 00000000000..78ec96e3a8c
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/root/trusted_root_create.go
@@ -0,0 +1,270 @@
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package root
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "crypto/x509"
+ "fmt"
+ "sort"
+ "time"
+
+ protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1"
+ prototrustroot "github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+func (tr *TrustedRoot) constructProtoTrustRoot() error {
+ tr.trustedRoot = &prototrustroot.TrustedRoot{}
+ tr.trustedRoot.MediaType = TrustedRootMediaType01
+
+ for logID, transparencyLog := range tr.rekorLogs {
+ tlProto, err := transparencyLogToProtobufTL(transparencyLog)
+ if err != nil {
+ return fmt.Errorf("failed converting rekor log %s to protobuf: %w", logID, err)
+ }
+ tr.trustedRoot.Tlogs = append(tr.trustedRoot.Tlogs, tlProto)
+ }
+ // ensure stable sorting of the slice
+ sortTlogSlice(tr.trustedRoot.Tlogs)
+
+ for logID, ctLog := range tr.ctLogs {
+ ctProto, err := transparencyLogToProtobufTL(ctLog)
+ if err != nil {
+ return fmt.Errorf("failed converting ctlog %s to protobuf: %w", logID, err)
+ }
+ tr.trustedRoot.Ctlogs = append(tr.trustedRoot.Ctlogs, ctProto)
+ }
+ // ensure stable sorting of the slice
+ sortTlogSlice(tr.trustedRoot.Ctlogs)
+
+ for _, ca := range tr.certificateAuthorities {
+ caProto, err := certificateAuthorityToProtobufCA(ca.(*FulcioCertificateAuthority))
+ if err != nil {
+ return fmt.Errorf("failed converting fulcio cert chain to protobuf: %w", err)
+ }
+ tr.trustedRoot.CertificateAuthorities = append(tr.trustedRoot.CertificateAuthorities, caProto)
+ }
+ // ensure stable sorting of the slice
+ sortCASlice(tr.trustedRoot.CertificateAuthorities)
+
+ for _, ca := range tr.timestampingAuthorities {
+ caProto, err := timestampingAuthorityToProtobufCA(ca.(*SigstoreTimestampingAuthority))
+ if err != nil {
+ return fmt.Errorf("failed converting TSA cert chain to protobuf: %w", err)
+ }
+ tr.trustedRoot.TimestampAuthorities = append(tr.trustedRoot.TimestampAuthorities, caProto)
+ }
+ // ensure stable sorting of the slice
+ sortCASlice(tr.trustedRoot.TimestampAuthorities)
+
+ return nil
+}
+
+func sortCASlice(slc []*prototrustroot.CertificateAuthority) {
+ sort.Slice(slc, func(i, j int) bool {
+ iTime := time.Unix(0, 0)
+ jTime := time.Unix(0, 0)
+
+ if slc[i].ValidFor.Start != nil {
+ iTime = slc[i].ValidFor.Start.AsTime()
+ }
+ if slc[j].ValidFor.Start != nil {
+ jTime = slc[j].ValidFor.Start.AsTime()
+ }
+
+ return iTime.Before(jTime)
+ })
+}
+
+func sortTlogSlice(slc []*prototrustroot.TransparencyLogInstance) {
+ sort.Slice(slc, func(i, j int) bool {
+ iTime := time.Unix(0, 0)
+ jTime := time.Unix(0, 0)
+
+ if slc[i].PublicKey.ValidFor.Start != nil {
+ iTime = slc[i].PublicKey.ValidFor.Start.AsTime()
+ }
+ if slc[j].PublicKey.ValidFor.Start != nil {
+ jTime = slc[j].PublicKey.ValidFor.Start.AsTime()
+ }
+
+ return iTime.Before(jTime)
+ })
+}
+
+func certificateAuthorityToProtobufCA(ca *FulcioCertificateAuthority) (*prototrustroot.CertificateAuthority, error) {
+ org := ""
+ if len(ca.Root.Subject.Organization) > 0 {
+ org = ca.Root.Subject.Organization[0]
+ }
+ var allCerts []*protocommon.X509Certificate
+ for _, intermed := range ca.Intermediates {
+ allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: intermed.Raw})
+ }
+ if ca.Root == nil {
+ return nil, fmt.Errorf("root certificate is nil")
+ }
+ allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: ca.Root.Raw})
+
+ caProto := prototrustroot.CertificateAuthority{
+ Uri: ca.URI,
+ Subject: &protocommon.DistinguishedName{
+ Organization: org,
+ CommonName: ca.Root.Subject.CommonName,
+ },
+ ValidFor: &protocommon.TimeRange{
+ Start: timestamppb.New(ca.ValidityPeriodStart),
+ },
+ CertChain: &protocommon.X509CertificateChain{
+ Certificates: allCerts,
+ },
+ }
+
+ if !ca.ValidityPeriodEnd.IsZero() {
+ caProto.ValidFor.End = timestamppb.New(ca.ValidityPeriodEnd)
+ }
+
+ return &caProto, nil
+}
+
+func timestampingAuthorityToProtobufCA(ca *SigstoreTimestampingAuthority) (*prototrustroot.CertificateAuthority, error) {
+ org := ""
+ if len(ca.Root.Subject.Organization) > 0 {
+ org = ca.Root.Subject.Organization[0]
+ }
+ var allCerts []*protocommon.X509Certificate
+ if ca.Leaf != nil {
+ allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: ca.Leaf.Raw})
+ }
+ for _, intermed := range ca.Intermediates {
+ allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: intermed.Raw})
+ }
+ if ca.Root == nil {
+ return nil, fmt.Errorf("root certificate is nil")
+ }
+ allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: ca.Root.Raw})
+
+ caProto := prototrustroot.CertificateAuthority{
+ Uri: ca.URI,
+ Subject: &protocommon.DistinguishedName{
+ Organization: org,
+ CommonName: ca.Root.Subject.CommonName,
+ },
+ ValidFor: &protocommon.TimeRange{
+ Start: timestamppb.New(ca.ValidityPeriodStart),
+ },
+ CertChain: &protocommon.X509CertificateChain{
+ Certificates: allCerts,
+ },
+ }
+
+ if !ca.ValidityPeriodEnd.IsZero() {
+ caProto.ValidFor.End = timestamppb.New(ca.ValidityPeriodEnd)
+ }
+
+ return &caProto, nil
+}
+
+func transparencyLogToProtobufTL(tl *TransparencyLog) (*prototrustroot.TransparencyLogInstance, error) {
+ hashAlgo, err := hashAlgorithmToProtobufHashAlgorithm(tl.HashFunc)
+ if err != nil {
+ return nil, fmt.Errorf("failed converting hash algorithm to protobuf: %w", err)
+ }
+ publicKey, err := publicKeyToProtobufPublicKey(tl.PublicKey, tl.ValidityPeriodStart, tl.ValidityPeriodEnd)
+ if err != nil {
+ return nil, fmt.Errorf("failed converting public key to protobuf: %w", err)
+ }
+ trProto := prototrustroot.TransparencyLogInstance{
+ BaseUrl: tl.BaseURL,
+ HashAlgorithm: hashAlgo,
+ PublicKey: publicKey,
+ LogId: &protocommon.LogId{
+ KeyId: tl.ID,
+ },
+ }
+
+ return &trProto, nil
+}
+
+func hashAlgorithmToProtobufHashAlgorithm(hashAlgorithm crypto.Hash) (protocommon.HashAlgorithm, error) {
+ switch hashAlgorithm {
+ case crypto.SHA256:
+ return protocommon.HashAlgorithm_SHA2_256, nil
+ case crypto.SHA384:
+ return protocommon.HashAlgorithm_SHA2_384, nil
+ case crypto.SHA512:
+ return protocommon.HashAlgorithm_SHA2_512, nil
+ case crypto.SHA3_256:
+ return protocommon.HashAlgorithm_SHA3_256, nil
+ case crypto.SHA3_384:
+ return protocommon.HashAlgorithm_SHA3_384, nil
+ default:
+ return 0, fmt.Errorf("unsupported hash algorithm for Merkle tree: %v", hashAlgorithm)
+ }
+}
+
+func publicKeyToProtobufPublicKey(publicKey crypto.PublicKey, start time.Time, end time.Time) (*protocommon.PublicKey, error) {
+ pkd := protocommon.PublicKey{
+ ValidFor: &protocommon.TimeRange{
+ Start: timestamppb.New(start),
+ },
+ }
+
+ if !end.IsZero() {
+ pkd.ValidFor.End = timestamppb.New(end)
+ }
+
+ rawBytes, err := x509.MarshalPKIXPublicKey(publicKey)
+ if err != nil {
+ return nil, fmt.Errorf("failed marshalling public key: %w", err)
+ }
+ pkd.RawBytes = rawBytes
+
+ switch p := publicKey.(type) {
+ case *ecdsa.PublicKey:
+ switch p.Curve {
+ case elliptic.P256():
+ pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256
+ case elliptic.P384():
+ pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384
+ case elliptic.P521():
+ pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ECDSA_P521_SHA_512
+ default:
+ return nil, fmt.Errorf("unsupported curve for ecdsa key: %T", p.Curve)
+ }
+ case *rsa.PublicKey:
+ switch p.Size() * 8 {
+ case 2048:
+ pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256
+ case 3072:
+ pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256
+ case 4096:
+ pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256
+ default:
+ return nil, fmt.Errorf("unsupported public modulus for RSA key: %d", p.Size())
+ }
+ case ed25519.PublicKey:
+ pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ED25519
+ default:
+ return nil, fmt.Errorf("unknown public key type: %T", p)
+ }
+
+ return &pkd, nil
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tlog/entry.go b/vendor/github.com/sigstore/sigstore-go/pkg/tlog/entry.go
new file mode 100644
index 00000000000..ff8bec4469a
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/tlog/entry.go
@@ -0,0 +1,529 @@
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tlog
+
+import (
+ "bytes"
+ "context"
+ "crypto/ecdsa"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer"
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag/conv"
+ protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1"
+ v1 "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1"
+ rekortilespb "github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf"
+ "github.com/sigstore/rekor-tiles/v2/pkg/note"
+ typesverifier "github.com/sigstore/rekor-tiles/v2/pkg/types/verifier"
+ "github.com/sigstore/rekor-tiles/v2/pkg/verify"
+ "github.com/sigstore/rekor/pkg/generated/models"
+ "github.com/sigstore/rekor/pkg/types"
+ dsse_v001 "github.com/sigstore/rekor/pkg/types/dsse/v0.0.1"
+ hashedrekord_v001 "github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1"
+ intoto_v002 "github.com/sigstore/rekor/pkg/types/intoto/v0.0.2"
+ rekorVerify "github.com/sigstore/rekor/pkg/verify"
+ "github.com/sigstore/sigstore/pkg/signature"
+ "google.golang.org/protobuf/encoding/protojson"
+
+ "github.com/sigstore/sigstore-go/pkg/root"
+)
+
+type Entry struct {
+ kind string
+ version string
+ rekorV1Entry types.EntryImpl
+ rekorV2Entry *rekortilespb.Entry
+ signedEntryTimestamp []byte
+ tle *v1.TransparencyLogEntry
+}
+
+type RekorPayload struct {
+ Body interface{} `json:"body"`
+ IntegratedTime int64 `json:"integratedTime"`
+ LogIndex int64 `json:"logIndex"`
+ LogID string `json:"logID"` //nolint:tagliatelle
+}
+
+var ErrNilValue = errors.New("validation error: nil value in transaction log entry")
+var ErrInvalidRekorV2Entry = errors.New("type error: object is not a Rekor v2 type, try parsing as Rekor v1")
+
+// Deprecated: use NewTlogEntry. NewEntry only parses a Rekor v1 entry.
+func NewEntry(body []byte, integratedTime int64, logIndex int64, logID []byte, signedEntryTimestamp []byte, inclusionProof *models.InclusionProof) (*Entry, error) {
+ pe, err := models.UnmarshalProposedEntry(bytes.NewReader(body), runtime.JSONConsumer())
+ if err != nil {
+ return nil, err
+ }
+ rekorEntry, err := types.UnmarshalEntry(pe)
+ if err != nil {
+ return nil, err
+ }
+
+ entry := &Entry{
+ rekorV1Entry: rekorEntry,
+ tle: &v1.TransparencyLogEntry{
+ LogIndex: logIndex,
+ LogId: &protocommon.LogId{
+ KeyId: logID,
+ },
+ IntegratedTime: integratedTime,
+ CanonicalizedBody: body,
+ },
+ kind: pe.Kind(),
+ version: rekorEntry.APIVersion(),
+ }
+
+ if len(signedEntryTimestamp) > 0 {
+ entry.signedEntryTimestamp = signedEntryTimestamp
+ }
+
+ if inclusionProof != nil {
+ hashes := make([][]byte, len(inclusionProof.Hashes))
+ for i, s := range inclusionProof.Hashes {
+ hashes[i], err = hex.DecodeString(s)
+ if err != nil {
+ return nil, err
+ }
+ }
+ rootHashDec, err := hex.DecodeString(*inclusionProof.RootHash)
+ if err != nil {
+ return nil, err
+ }
+ entry.tle.InclusionProof = &v1.InclusionProof{
+ LogIndex: logIndex,
+ RootHash: rootHashDec,
+ TreeSize: *inclusionProof.TreeSize,
+ Hashes: hashes,
+ Checkpoint: &v1.Checkpoint{
+ Envelope: *inclusionProof.Checkpoint,
+ },
+ }
+ }
+
+ return entry, nil
+}
+
+func NewTlogEntry(tle *v1.TransparencyLogEntry) (*Entry, error) {
+ var rekorV2Entry *rekortilespb.Entry
+ var rekorV1Entry types.EntryImpl
+ var err error
+
+ body := tle.CanonicalizedBody
+ rekorV2Entry, err = unmarshalRekorV2Entry(body)
+ if err != nil {
+ rekorV1Entry, err = unmarshalRekorV1Entry(body)
+ if err != nil {
+ return nil, fmt.Errorf("entry body is not a recognizable Rekor v1 or Rekor v2 type: %w", err)
+ }
+ }
+
+ entry := &Entry{
+ rekorV1Entry: rekorV1Entry,
+ rekorV2Entry: rekorV2Entry,
+ kind: tle.KindVersion.Kind,
+ version: tle.KindVersion.Version,
+ }
+
+ signedEntryTimestamp := []byte{}
+ if tle.InclusionPromise != nil && tle.InclusionPromise.SignedEntryTimestamp != nil {
+ signedEntryTimestamp = tle.InclusionPromise.SignedEntryTimestamp
+ }
+ if len(signedEntryTimestamp) > 0 {
+ entry.signedEntryTimestamp = signedEntryTimestamp
+ }
+ entry.tle = tle
+
+ return entry, nil
+}
+
+func ParseTransparencyLogEntry(tle *v1.TransparencyLogEntry) (*Entry, error) {
+ if tle == nil {
+ return nil, ErrNilValue
+ }
+ if tle.CanonicalizedBody == nil ||
+ tle.LogIndex < 0 ||
+ tle.LogId == nil ||
+ tle.LogId.KeyId == nil ||
+ tle.KindVersion == nil {
+ return nil, ErrNilValue
+ }
+
+ if tle.InclusionProof != nil {
+ if tle.InclusionProof.Checkpoint == nil {
+ return nil, fmt.Errorf("inclusion proof missing required checkpoint")
+ }
+ if tle.InclusionProof.Checkpoint.Envelope == "" {
+ return nil, fmt.Errorf("inclusion proof checkpoint empty")
+ }
+ }
+
+ entry, err := NewTlogEntry(tle)
+ if err != nil {
+ return nil, err
+ }
+ if entry.kind != tle.KindVersion.Kind || entry.version != tle.KindVersion.Version {
+ return nil, fmt.Errorf("kind and version mismatch: %s/%s != %s/%s", entry.kind, entry.version, tle.KindVersion.Kind, tle.KindVersion.Version)
+ }
+ return entry, nil
+}
+
+// Deprecated: use ParseTransparencyLogEntry. ParseEntry only parses Rekor v1 type entries.
+// ParseEntry decodes the entry bytes to a specific entry type (types.EntryImpl).
+func ParseEntry(protoEntry *v1.TransparencyLogEntry) (entry *Entry, err error) {
+ if protoEntry == nil ||
+ protoEntry.CanonicalizedBody == nil ||
+ protoEntry.IntegratedTime == 0 ||
+ protoEntry.LogIndex < 0 ||
+ protoEntry.LogId == nil ||
+ protoEntry.LogId.KeyId == nil ||
+ protoEntry.KindVersion == nil {
+ return nil, ErrNilValue
+ }
+
+ signedEntryTimestamp := []byte{}
+ if protoEntry.InclusionPromise != nil && protoEntry.InclusionPromise.SignedEntryTimestamp != nil {
+ signedEntryTimestamp = protoEntry.InclusionPromise.SignedEntryTimestamp
+ }
+
+ var inclusionProof *models.InclusionProof
+
+ if protoEntry.InclusionProof != nil {
+ var hashes []string
+
+ for _, v := range protoEntry.InclusionProof.Hashes {
+ hashes = append(hashes, hex.EncodeToString(v))
+ }
+
+ rootHash := hex.EncodeToString(protoEntry.InclusionProof.RootHash)
+
+ if protoEntry.InclusionProof.Checkpoint == nil {
+ return nil, fmt.Errorf("inclusion proof missing required checkpoint")
+ }
+ if protoEntry.InclusionProof.Checkpoint.Envelope == "" {
+ return nil, fmt.Errorf("inclusion proof checkpoint empty")
+ }
+
+ inclusionProof = &models.InclusionProof{
+ LogIndex: conv.Pointer(protoEntry.InclusionProof.LogIndex),
+ RootHash: &rootHash,
+ TreeSize: conv.Pointer(protoEntry.InclusionProof.TreeSize),
+ Hashes: hashes,
+ Checkpoint: conv.Pointer(protoEntry.InclusionProof.Checkpoint.Envelope),
+ }
+ }
+
+ entry, err = NewEntry(protoEntry.CanonicalizedBody, protoEntry.IntegratedTime, protoEntry.LogIndex, protoEntry.LogId.KeyId, signedEntryTimestamp, inclusionProof)
+ if err != nil {
+ return nil, err
+ }
+
+ if entry.kind != protoEntry.KindVersion.Kind || entry.version != protoEntry.KindVersion.Version {
+ return nil, fmt.Errorf("kind and version mismatch: %s/%s != %s/%s", entry.kind, entry.version, protoEntry.KindVersion.Kind, protoEntry.KindVersion.Version)
+ }
+ entry.tle = protoEntry
+
+ return entry, nil
+}
+
+func ValidateEntry(entry *Entry) error {
+ if entry.rekorV1Entry != nil {
+ switch e := entry.rekorV1Entry.(type) {
+ case *dsse_v001.V001Entry:
+ err := e.DSSEObj.Validate(strfmt.Default)
+ if err != nil {
+ return err
+ }
+ case *hashedrekord_v001.V001Entry:
+ err := e.HashedRekordObj.Validate(strfmt.Default)
+ if err != nil {
+ return err
+ }
+ case *intoto_v002.V002Entry:
+ err := e.IntotoObj.Validate(strfmt.Default)
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("unsupported entry type: %T", e)
+ }
+ }
+ if entry.rekorV2Entry != nil {
+ switch e := entry.rekorV2Entry.GetSpec().GetSpec().(type) {
+ case *rekortilespb.Spec_HashedRekordV002:
+ err := validateHashedRekordV002Entry(e.HashedRekordV002)
+ if err != nil {
+ return err
+ }
+ case *rekortilespb.Spec_DsseV002:
+ err := validateDSSEV002Entry(e.DsseV002)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func validateHashedRekordV002Entry(hr *rekortilespb.HashedRekordLogEntryV002) error {
+ if hr.GetSignature() == nil || len(hr.GetSignature().GetContent()) == 0 {
+ return fmt.Errorf("missing signature")
+ }
+ if hr.GetSignature().GetVerifier() == nil {
+ return fmt.Errorf("missing verifier")
+ }
+ if hr.GetData() == nil {
+ return fmt.Errorf("missing digest")
+ }
+ return typesverifier.Validate(hr.GetSignature().GetVerifier())
+}
+
+func validateDSSEV002Entry(d *rekortilespb.DSSELogEntryV002) error {
+ if d.GetPayloadHash() == nil {
+ return fmt.Errorf("missing payload")
+ }
+ if len(d.GetSignatures()) == 0 {
+ return fmt.Errorf("missing signatures")
+ }
+ return typesverifier.Validate(d.GetSignatures()[0].GetVerifier())
+}
+
+func (entry *Entry) IntegratedTime() time.Time {
+ if entry.tle.IntegratedTime == 0 {
+ return time.Time{}
+ }
+ return time.Unix(entry.tle.IntegratedTime, 0)
+}
+
+func (entry *Entry) Signature() []byte {
+ if entry.rekorV1Entry != nil {
+ switch e := entry.rekorV1Entry.(type) {
+ case *dsse_v001.V001Entry:
+ sigBytes, err := base64.StdEncoding.DecodeString(*e.DSSEObj.Signatures[0].Signature)
+ if err != nil {
+ return []byte{}
+ }
+ return sigBytes
+ case *hashedrekord_v001.V001Entry:
+ return e.HashedRekordObj.Signature.Content
+ case *intoto_v002.V002Entry:
+ sigBytes, err := base64.StdEncoding.DecodeString(string(*e.IntotoObj.Content.Envelope.Signatures[0].Sig))
+ if err != nil {
+ return []byte{}
+ }
+ return sigBytes
+ }
+ }
+ if entry.rekorV2Entry != nil {
+ switch e := entry.rekorV2Entry.GetSpec().GetSpec().(type) {
+ case *rekortilespb.Spec_HashedRekordV002:
+ return e.HashedRekordV002.GetSignature().GetContent()
+ case *rekortilespb.Spec_DsseV002:
+ return e.DsseV002.GetSignatures()[0].GetContent()
+ }
+ }
+
+ return []byte{}
+}
+
+func (entry *Entry) PublicKey() any {
+ var pk any
+ var certBytes []byte
+
+ if entry.rekorV1Entry != nil {
+ var pemString []byte
+ switch e := entry.rekorV1Entry.(type) {
+ case *dsse_v001.V001Entry:
+ pemString = []byte(*e.DSSEObj.Signatures[0].Verifier)
+ case *hashedrekord_v001.V001Entry:
+ pemString = []byte(e.HashedRekordObj.Signature.PublicKey.Content)
+ case *intoto_v002.V002Entry:
+ pemString = []byte(*e.IntotoObj.Content.Envelope.Signatures[0].PublicKey)
+ }
+ certBlock, _ := pem.Decode(pemString)
+ certBytes = certBlock.Bytes
+ } else if entry.rekorV2Entry != nil {
+ var verifier *rekortilespb.Verifier
+ switch e := entry.rekorV2Entry.GetSpec().GetSpec().(type) {
+ case *rekortilespb.Spec_HashedRekordV002:
+ verifier = e.HashedRekordV002.GetSignature().GetVerifier()
+ case *rekortilespb.Spec_DsseV002:
+ verifier = e.DsseV002.GetSignatures()[0].GetVerifier()
+ }
+ switch verifier.Verifier.(type) {
+ case *rekortilespb.Verifier_PublicKey:
+ certBytes = verifier.GetPublicKey().GetRawBytes()
+ case *rekortilespb.Verifier_X509Certificate:
+ certBytes = verifier.GetX509Certificate().GetRawBytes()
+ }
+ }
+
+ var err error
+
+ pk, err = x509.ParseCertificate(certBytes)
+ if err != nil {
+ pk, err = x509.ParsePKIXPublicKey(certBytes)
+ if err != nil {
+ return nil
+ }
+ }
+
+ return pk
+}
+
+func (entry *Entry) LogKeyID() string {
+ return string(entry.tle.GetLogId().GetKeyId())
+}
+
+func (entry *Entry) LogIndex() int64 {
+ return entry.tle.GetLogIndex()
+}
+
+func (entry *Entry) Body() any {
+ return base64.StdEncoding.EncodeToString(entry.tle.CanonicalizedBody)
+}
+
+func (entry *Entry) HasInclusionPromise() bool {
+ return entry.signedEntryTimestamp != nil
+}
+
+func (entry *Entry) HasInclusionProof() bool {
+ return entry.tle.InclusionProof != nil
+}
+
+func (entry *Entry) TransparencyLogEntry() *v1.TransparencyLogEntry {
+ return entry.tle
+}
+
+// VerifyInclusion verifies a Rekor v1-style checkpoint and the entry's inclusion in the Rekor v1 log.
+func VerifyInclusion(entry *Entry, verifier signature.Verifier) error {
+ hashes := make([]string, len(entry.tle.InclusionProof.Hashes))
+ for i, b := range entry.tle.InclusionProof.Hashes {
+ hashes[i] = hex.EncodeToString(b)
+ }
+ rootHash := hex.EncodeToString(entry.tle.GetInclusionProof().GetRootHash())
+ logEntry := models.LogEntryAnon{
+ IntegratedTime: &entry.tle.IntegratedTime,
+ LogID: conv.Pointer(string(entry.tle.GetLogId().KeyId)),
+ LogIndex: conv.Pointer(entry.tle.GetInclusionProof().GetLogIndex()),
+ Body: base64.StdEncoding.EncodeToString(entry.tle.GetCanonicalizedBody()),
+ Verification: &models.LogEntryAnonVerification{
+ InclusionProof: &models.InclusionProof{
+ Checkpoint: conv.Pointer(entry.tle.GetInclusionProof().GetCheckpoint().GetEnvelope()),
+ Hashes: hashes,
+ LogIndex: conv.Pointer(entry.tle.GetInclusionProof().GetLogIndex()),
+ RootHash: &rootHash,
+ TreeSize: conv.Pointer(entry.tle.GetInclusionProof().GetTreeSize()),
+ },
+ SignedEntryTimestamp: strfmt.Base64(entry.signedEntryTimestamp),
+ },
+ }
+ err := rekorVerify.VerifyInclusion(context.Background(), &logEntry)
+ if err != nil {
+ return err
+ }
+
+ err = rekorVerify.VerifyCheckpointSignature(&logEntry, verifier)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// VerifyCheckpointAndInclusion verifies a checkpoint and the entry's inclusion in the transparency log.
+// This function is compatible with Rekor v1 and Rekor v2.
+func VerifyCheckpointAndInclusion(entry *Entry, verifier signature.Verifier, origin string) error {
+ noteVerifier, err := note.NewNoteVerifier(origin, verifier)
+ if err != nil {
+ return fmt.Errorf("loading note verifier: %w", err)
+ }
+ err = verify.VerifyLogEntry(entry.TransparencyLogEntry(), noteVerifier)
+ if err != nil {
+ return fmt.Errorf("verifying log entry: %w", err)
+ }
+
+ return nil
+}
+
+func VerifySET(entry *Entry, verifiers map[string]*root.TransparencyLog) error {
+ if entry.rekorV1Entry == nil {
+ return fmt.Errorf("can only verify SET for Rekor v1 entry")
+ }
+ rekorPayload := RekorPayload{
+ Body: entry.Body(),
+ IntegratedTime: entry.tle.IntegratedTime,
+ LogIndex: entry.LogIndex(),
+ LogID: hex.EncodeToString([]byte(entry.LogKeyID())),
+ }
+
+ verifier, ok := verifiers[hex.EncodeToString([]byte(entry.LogKeyID()))]
+ if !ok {
+ return errors.New("rekor log public key not found for payload")
+ }
+ if verifier.ValidityPeriodStart.IsZero() {
+ return errors.New("rekor validity period start time not set")
+ }
+ if (verifier.ValidityPeriodStart.After(entry.IntegratedTime())) ||
+ (!verifier.ValidityPeriodEnd.IsZero() && verifier.ValidityPeriodEnd.Before(entry.IntegratedTime())) {
+ return errors.New("rekor log public key not valid at payload integrated time")
+ }
+
+ contents, err := json.Marshal(rekorPayload)
+ if err != nil {
+ return fmt.Errorf("marshaling: %w", err)
+ }
+ canonicalized, err := jsoncanonicalizer.Transform(contents)
+ if err != nil {
+ return fmt.Errorf("canonicalizing: %w", err)
+ }
+
+ hash := sha256.Sum256(canonicalized)
+ if ecdsaPublicKey, ok := verifier.PublicKey.(*ecdsa.PublicKey); !ok {
+ return fmt.Errorf("unsupported public key type: %T", verifier.PublicKey)
+ } else if !ecdsa.VerifyASN1(ecdsaPublicKey, hash[:], entry.signedEntryTimestamp) {
+ return errors.New("unable to verify SET")
+ }
+ return nil
+}
+
+func unmarshalRekorV1Entry(body []byte) (types.EntryImpl, error) {
+ pe, err := models.UnmarshalProposedEntry(bytes.NewReader(body), runtime.JSONConsumer())
+ if err != nil {
+ return nil, err
+ }
+ rekorEntry, err := types.UnmarshalEntry(pe)
+ if err != nil {
+ return nil, err
+ }
+ return rekorEntry, nil
+}
+
+func unmarshalRekorV2Entry(body []byte) (*rekortilespb.Entry, error) {
+ logEntryBody := rekortilespb.Entry{}
+ err := protojson.Unmarshal(body, &logEntryBody)
+ if err != nil {
+ return nil, ErrInvalidRekorV2Entry
+ }
+ return &logEntryBody, nil
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/client.go b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/client.go
new file mode 100644
index 00000000000..e2e76d69074
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/client.go
@@ -0,0 +1,211 @@
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tuf
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/theupdateframework/go-tuf/v2/metadata/config"
+ "github.com/theupdateframework/go-tuf/v2/metadata/fetcher"
+ "github.com/theupdateframework/go-tuf/v2/metadata/updater"
+
+ "github.com/sigstore/sigstore-go/pkg/util"
+)
+
+// Client is a Sigstore TUF client
+type Client struct {
+ cfg *config.UpdaterConfig
+ up *updater.Updater
+ opts *Options
+}
+
+// New returns a new client with custom options
+func New(opts *Options) (*Client, error) {
+ var c = Client{
+ opts: opts,
+ }
+ dir := filepath.Join(opts.CachePath, URLToPath(opts.RepositoryBaseURL))
+ var err error
+
+ if c.cfg, err = config.New(opts.RepositoryBaseURL, opts.Root); err != nil {
+ return nil, fmt.Errorf("failed to create TUF client: %w", err)
+ }
+
+ c.cfg.LocalMetadataDir = dir
+ c.cfg.LocalTargetsDir = filepath.Join(dir, "targets")
+ c.cfg.DisableLocalCache = c.opts.DisableLocalCache
+ c.cfg.PrefixTargetsWithHash = !c.opts.DisableConsistentSnapshot
+
+ if c.cfg.DisableLocalCache {
+ c.opts.CachePath = ""
+ c.opts.CacheValidity = 0
+ c.opts.ForceCache = false
+ }
+
+ if opts.Fetcher != nil {
+ c.cfg.Fetcher = opts.Fetcher
+ } else {
+ fetcher := fetcher.NewDefaultFetcher()
+ fetcher.SetHTTPUserAgent(util.ConstructUserAgent())
+ c.cfg.Fetcher = fetcher
+ }
+
+ // Upon client creation, we may not perform a full TUF update,
+ // based on the cache control configuration. Start with a local
+ // client (only reads content on disk) and then decide if we
+ // must perform a full TUF update.
+ tmpCfg := *c.cfg
+ // Create a temporary config for the first use where UnsafeLocalMode
+ // is true. This means that when we first initialize the client,
+ // we are guaranteed to only read the metadata on disk.
+ // Based on that metadata we take a decision if a full TUF
+ // refresh should be done or not. As so, the tmpCfg is only needed
+ // here and not in future invocations.
+ tmpCfg.UnsafeLocalMode = true
+ c.up, err = updater.New(&tmpCfg)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create initial TUF updater: %w", err)
+ }
+ if err = c.loadMetadata(); err != nil {
+ return nil, fmt.Errorf("failed to load metadata: %w", err)
+ }
+
+ return &c, nil
+}
+
+// DefaultClient returns a Sigstore TUF client for the public good instance
+func DefaultClient() (*Client, error) {
+ opts := DefaultOptions()
+
+ return New(opts)
+}
+
+// loadMetadata controls if the client actually should perform a TUF refresh.
+// The TUF specification mandates so, but for certain Sigstore clients, it
+// may be beneficial to rely on the cache, or in air-gapped deployments it
+// it may not even be possible.
+func (c *Client) loadMetadata() error {
+ // Load the metadata into memory and verify it
+ if err := c.up.Refresh(); err != nil {
+ // this is most likely due to the lack of metadata files
+ // on disk. Perform a full update and return.
+ return c.Refresh()
+ }
+
+ if c.opts.ForceCache {
+ return nil
+ } else if c.opts.CacheValidity > 0 {
+ cfg, err := LoadConfig(c.configPath())
+ if err != nil {
+ // Config may not exist, don't error
+ // create a new empty config
+ cfg = &Config{}
+ }
+
+ cacheValidUntil := cfg.LastTimestamp.AddDate(0, 0, c.opts.CacheValidity)
+ if time.Now().Before(cacheValidUntil) {
+ // No need to update
+ return nil
+ }
+ }
+
+ return c.Refresh()
+}
+
+func (c *Client) configPath() string {
+ var p = filepath.Join(
+ c.opts.CachePath,
+ fmt.Sprintf("%s.json", URLToPath(c.opts.RepositoryBaseURL)),
+ )
+
+ return p
+}
+
+// Refresh forces a refresh of the underlying TUF client.
+// As the tuf client updater does not support multiple refreshes during
+// its life-time, this will replace the TUF client updater with a new one.
+func (c *Client) Refresh() error {
+ var err error
+
+ c.up, err = updater.New(c.cfg)
+ if err != nil {
+ return fmt.Errorf("failed to create tuf updater: %w", err)
+ }
+ err = c.up.Refresh()
+ if err != nil {
+ return fmt.Errorf("tuf refresh failed: %w", err)
+ }
+ // If cache is disabled, we don't need to persist the last timestamp
+ if c.cfg.DisableLocalCache {
+ return nil
+ }
+ // Update config with last update
+ cfg, err := LoadConfig(c.configPath())
+ if err != nil {
+ // Likely config file did not exit, create it
+ cfg = &Config{}
+ }
+ cfg.LastTimestamp = time.Now()
+ // ignore error writing update config file
+ _ = cfg.Persist(c.configPath())
+
+ return nil
+}
+
+// GetTarget returns a target file from the TUF repository
+func (c *Client) GetTarget(target string) ([]byte, error) {
+ // Set filepath to the empty string. When we get targets,
+ // we rely in the target info struct instead.
+ const filePath = ""
+ ti, err := c.up.GetTargetInfo(target)
+ if err != nil {
+ return nil, fmt.Errorf("getting info for target \"%s\": %w", target, err)
+ }
+
+ path, tb, err := c.up.FindCachedTarget(ti, filePath)
+ if err != nil {
+ return nil, fmt.Errorf("getting target cache: %w", err)
+ }
+ if path != "" {
+ // Cached version found
+ return tb, nil
+ }
+
+ // Download of target is needed
+ // Ignore targetsBaseURL, set to empty string
+ const targetsBaseURL = ""
+ _, tb, err = c.up.DownloadTarget(ti, filePath, targetsBaseURL)
+ if err != nil {
+ return nil, fmt.Errorf("failed to download target file %s - %w", target, err)
+ }
+
+ return tb, nil
+}
+
+// URLToPath converts a URL to a filename-compatible string
+func URLToPath(url string) string {
+ // Strip scheme, replace slashes with dashes
+ // e.g. https://github.github.com/prod-tuf-root -> github.github.com-prod-tuf-root
+ fn := url
+ fn, _ = strings.CutPrefix(fn, "https://")
+ fn, _ = strings.CutPrefix(fn, "http://")
+ fn = strings.ReplaceAll(fn, "/", "-")
+ fn = strings.ReplaceAll(fn, ":", "-")
+
+ return strings.ToLower(fn)
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/config.go b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/config.go
new file mode 100644
index 00000000000..3f5a81f1e5e
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/config.go
@@ -0,0 +1,54 @@
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tuf
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "time"
+)
+
+type Config struct {
+ LastTimestamp time.Time `json:"lastTimestamp"`
+}
+
+func LoadConfig(p string) (*Config, error) {
+ var c Config
+
+ b, err := os.ReadFile(p)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read config: %w", err)
+ }
+ err = json.Unmarshal(b, &c)
+ if err != nil {
+ return nil, fmt.Errorf("malformed config file: %w", err)
+ }
+
+ return &c, nil
+}
+
+func (c *Config) Persist(p string) error {
+ b, err := json.Marshal(c)
+ if err != nil {
+ return fmt.Errorf("failed to JSON marshal config: %w", err)
+ }
+ err = os.WriteFile(p, b, 0600)
+ if err != nil {
+ return fmt.Errorf("failed to write config: %w", err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/options.go b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/options.go
new file mode 100644
index 00000000000..e3df77a55d5
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/options.go
@@ -0,0 +1,177 @@
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tuf
+
+import (
+ "embed"
+ "math"
+ "os"
+ "path/filepath"
+
+ "github.com/sigstore/sigstore-go/pkg/util"
+ "github.com/theupdateframework/go-tuf/v2/metadata/fetcher"
+)
+
+//go:embed repository
+var embeddedRepo embed.FS
+
+const (
+ DefaultMirror = "https://tuf-repo-cdn.sigstore.dev"
+ StagingMirror = "https://tuf-repo-cdn.sigstage.dev"
+
+ // The following caching values can be used for the CacheValidity option
+ NoCache = 0
+ MaxCache = math.MaxInt
+)
+
+// Options represent the various options for a Sigstore TUF Client
+type Options struct {
+ // CacheValidity period in days (default 0). The client will persist a
+ // timestamp with the cache after refresh. Note that the client will
+ // always refresh the cache if the metadata is expired or if the client is
+ // unable to find a persisted timestamp, so this is not an optimal control
+ // for air-gapped environments. Use const MaxCache to update the cache when
+ // the metadata is expired, though the first initialization will still
+ // refresh the cache.
+ CacheValidity int
+ // ForceCache controls if the cache should be used without update
+ // as long as the metadata is valid. Use ForceCache over CacheValidity
+ // if you want to always use the cache up until its expiration. Note that
+ // the client will refresh the cache once the metadata has expired, so this
+ // is not an optimal control for air-gapped environments. Clients instead
+ // should provide a trust root file directly to the client to bypass TUF.
+ ForceCache bool
+ // Root is the TUF trust anchor
+ Root []byte
+ // CachePath is the location on disk for TUF cache
+ // (default $HOME/.sigstore/tuf)
+ CachePath string
+ // RepositoryBaseURL is the TUF repository location URL
+ // (default https://tuf-repo-cdn.sigstore.dev)
+ RepositoryBaseURL string
+ // DisableLocalCache mode allows a client to work on a read-only
+ // files system if this is set, cache path is ignored.
+ DisableLocalCache bool
+ // DisableConsistentSnapshot
+ DisableConsistentSnapshot bool
+ // Fetcher is the metadata fetcher
+ Fetcher fetcher.Fetcher
+}
+
+// WithCacheValidity sets the cache validity period in days
+func (o *Options) WithCacheValidity(days int) *Options {
+ o.CacheValidity = days
+ return o
+}
+
+// WithForceCache forces the client to use the cache without updating
+func (o *Options) WithForceCache() *Options {
+ o.ForceCache = true
+ return o
+}
+
+// WithRoot sets the TUF trust anchor
+func (o *Options) WithRoot(root []byte) *Options {
+ o.Root = root
+ return o
+}
+
+// WithCachePath sets the location on disk for TUF cache
+func (o *Options) WithCachePath(path string) *Options {
+ o.CachePath = path
+ return o
+}
+
+// WithRepositoryBaseURL sets the TUF repository location URL
+func (o *Options) WithRepositoryBaseURL(url string) *Options {
+ o.RepositoryBaseURL = url
+ return o
+}
+
+// WithDisableLocalCache sets the client to work on a read-only file system
+func (o *Options) WithDisableLocalCache() *Options {
+ o.DisableLocalCache = true
+ return o
+}
+
+// WithDisableConsistentSnapshot sets the client to disable consistent snapshot
+func (o *Options) WithDisableConsistentSnapshot() *Options {
+ o.DisableConsistentSnapshot = true
+ return o
+}
+
+// WithFetcher sets the metadata fetcher
+func (o *Options) WithFetcher(f fetcher.Fetcher) *Options {
+ o.Fetcher = f
+ return o
+}
+
+// DefaultOptions returns an options struct for the public good instance
+func DefaultOptions() *Options {
+ var opts Options
+ var err error
+
+ opts.Root = DefaultRoot()
+ home, err := os.UserHomeDir()
+ if err != nil {
+ // Fall back to using a TUF repository in the temp location
+ home = os.TempDir()
+ }
+ opts.CachePath = filepath.Join(home, ".sigstore", "root")
+ opts.RepositoryBaseURL = DefaultMirror
+ fetcher := fetcher.NewDefaultFetcher()
+ fetcher.SetHTTPUserAgent(util.ConstructUserAgent())
+ opts.Fetcher = fetcher
+
+ return &opts
+}
+
+// DefaultRoot returns the root.json for the public good instance
+func DefaultRoot() []byte {
+ // The embed file system always uses forward slashes as path separators,
+ // even on Windows
+ p := "repository/root.json"
+
+ b, err := embeddedRepo.ReadFile(p)
+ if err != nil {
+ // This should never happen.
+ // ReadFile from an embedded FS will never fail as long as
+ // the path is correct. If it fails, it would mean
+ // that the binary is not assembled as it should, and there
+ // is no way to recover from that.
+ panic(err)
+ }
+
+ return b
+}
+
+// StagingRoot returns the root.json for the staging instance
+func StagingRoot() []byte {
+ // The embed file system always uses forward slashes as path separators,
+ // even on Windows
+ p := "repository/staging_root.json"
+
+ b, err := embeddedRepo.ReadFile(p)
+ if err != nil {
+ // This should never happen.
+ // ReadFile from an embedded FS will never fail as long as
+ // the path is correct. If it fails, it would mean
+ // that the binary is not assembled as it should, and there
+ // is no way to recover from that.
+ panic(err)
+ }
+
+ return b
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/root.json b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/root.json
new file mode 100644
index 00000000000..fe4f3ba2189
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/root.json
@@ -0,0 +1,145 @@
+{
+ "signatures": [
+ {
+ "keyid": "6f260089d5923daf20166ca657c543af618346ab971884a99962b01988bbe0c3",
+ "sig": ""
+ },
+ {
+ "keyid": "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2",
+ "sig": "3045022100bbddd464f8066ceb88ba787375c12cd6330680e08c2910703e6538c71cc79ad202205190b06e4537fe961b3ef81fe68edcd0089c19f919afed423b9aafd700641153"
+ },
+ {
+ "keyid": "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06",
+ "sig": "3044022069306cd5257f732a740c1afe60a8e433c5de58eafeadbe99c336c9c71d198cf802200d773953ae7dbc48d3e5bad9a6f64bafff196b7e2ad4a52a19519367d47dc042"
+ },
+ {
+ "keyid": "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222",
+ "sig": "304402204d21a2ec80df66e61f6fe2912951dc47df836036f8c0ab10816d375e71dbf79e0220547adce1afdf04e6794efa203dd5264c6f7e0ef78e57fe934b0d26cb994eec76"
+ },
+ {
+ "keyid": "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70",
+ "sig": "3045022060826496557144eb1649893ed5f6f4ea54536feb0ca82f8b89ae641be39743e5022100ad7118b5e9d4837326206e412fc6da2999925d110328a7c166b06c624336c93f"
+ },
+ {
+ "keyid": "183e64f37670dc13ca0d28995a3053f3740954ddce44321a41e46534cf44e632",
+ "sig": "3046022100d8179439c2e73eb0c1733abee7faf832dcaea7263edcb4919891c3a247f05923022100e1a437e0797e803f9b72dc9d2d92155b0a2270c24efdd5f4b3a5d8f0b0f431a7"
+ }
+ ],
+ "signed": {
+ "_type": "root",
+ "consistent_snapshot": true,
+ "expires": "2026-01-22T13:05:59Z",
+ "keys": {
+ "0c87432c3bf09fd99189fdc32fa5eaedf4e4a5fac7bab73fa04a2e0fc64af6f5": {
+ "keyid_hash_algorithms": [
+ "sha256",
+ "sha512"
+ ],
+ "keytype": "ecdsa",
+ "keyval": {
+ "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEWRiGr5+j+3J5SsH+Ztr5nE2H2wO7\nBV+nO3s93gLca18qTOzHY1oWyAGDykMSsGTUBSt9D+An0KfKsD2mfSM42Q==\n-----END PUBLIC KEY-----\n"
+ },
+ "scheme": "ecdsa-sha2-nistp256",
+ "x-tuf-on-ci-online-uri": "gcpkms:projects/sigstore-root-signing/locations/global/keyRings/root/cryptoKeys/timestamp/cryptoKeyVersions/1"
+ },
+ "183e64f37670dc13ca0d28995a3053f3740954ddce44321a41e46534cf44e632": {
+ "keytype": "ecdsa",
+ "keyval": {
+ "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEMxpPOJCIZ5otG4106fGJseEQi3V9\npkMYQ4uyV9Tj1M7WHXIyLG+jkfvuG0glQ1JZbRZZBV3gAR4sojdGHISeow==\n-----END PUBLIC KEY-----\n"
+ },
+ "scheme": "ecdsa-sha2-nistp256",
+ "x-tuf-on-ci-keyowner": "@lance"
+ },
+ "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06": {
+ "keyid_hash_algorithms": [
+ "sha256",
+ "sha512"
+ ],
+ "keytype": "ecdsa",
+ "keyval": {
+ "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEzBzVOmHCPojMVLSI364WiiV8NPrD\n6IgRxVliskz/v+y3JER5mcVGcONliDcWMC5J2lfHmjPNPhb4H7xm8LzfSA==\n-----END PUBLIC KEY-----\n"
+ },
+ "scheme": "ecdsa-sha2-nistp256",
+ "x-tuf-on-ci-keyowner": "@santiagotorres"
+ },
+ "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222": {
+ "keyid_hash_algorithms": [
+ "sha256",
+ "sha512"
+ ],
+ "keytype": "ecdsa",
+ "keyval": {
+ "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEinikSsAQmYkNeH5eYq/CnIzLaacO\nxlSaawQDOwqKy/tCqxq5xxPSJc21K4WIhs9GyOkKfzueY3GILzcMJZ4cWw==\n-----END PUBLIC KEY-----\n"
+ },
+ "scheme": "ecdsa-sha2-nistp256",
+ "x-tuf-on-ci-keyowner": "@bobcallaway"
+ },
+ "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70": {
+ "keyid_hash_algorithms": [
+ "sha256",
+ "sha512"
+ ],
+ "keytype": "ecdsa",
+ "keyval": {
+ "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE0ghrh92Lw1Yr3idGV5WqCtMDB8Cx\n+D8hdC4w2ZLNIplVRoVGLskYa3gheMyOjiJ8kPi15aQ2//7P+oj7UvJPGw==\n-----END PUBLIC KEY-----\n"
+ },
+ "scheme": "ecdsa-sha2-nistp256",
+ "x-tuf-on-ci-keyowner": "@joshuagl"
+ },
+ "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2": {
+ "keyid_hash_algorithms": [
+ "sha256",
+ "sha512"
+ ],
+ "keytype": "ecdsa",
+ "keyval": {
+ "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEXsz3SZXFb8jMV42j6pJlyjbjR8K\nN3Bwocexq6LMIb5qsWKOQvLN16NUefLc4HswOoumRsVVaajSpQS6fobkRw==\n-----END PUBLIC KEY-----\n"
+ },
+ "scheme": "ecdsa-sha2-nistp256",
+ "x-tuf-on-ci-keyowner": "@mnm678"
+ }
+ },
+ "roles": {
+ "root": {
+ "keyids": [
+ "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2",
+ "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06",
+ "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222",
+ "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70",
+ "183e64f37670dc13ca0d28995a3053f3740954ddce44321a41e46534cf44e632"
+ ],
+ "threshold": 3
+ },
+ "snapshot": {
+ "keyids": [
+ "0c87432c3bf09fd99189fdc32fa5eaedf4e4a5fac7bab73fa04a2e0fc64af6f5"
+ ],
+ "threshold": 1,
+ "x-tuf-on-ci-expiry-period": 3650,
+ "x-tuf-on-ci-signing-period": 365
+ },
+ "targets": {
+ "keyids": [
+ "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2",
+ "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06",
+ "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222",
+ "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70",
+ "183e64f37670dc13ca0d28995a3053f3740954ddce44321a41e46534cf44e632"
+ ],
+ "threshold": 3
+ },
+ "timestamp": {
+ "keyids": [
+ "0c87432c3bf09fd99189fdc32fa5eaedf4e4a5fac7bab73fa04a2e0fc64af6f5"
+ ],
+ "threshold": 1,
+ "x-tuf-on-ci-expiry-period": 7,
+ "x-tuf-on-ci-signing-period": 6
+ }
+ },
+ "spec_version": "1.0",
+ "version": 13,
+ "x-tuf-on-ci-expiry-period": 197,
+ "x-tuf-on-ci-signing-period": 46
+ }
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/staging_root.json b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/staging_root.json
new file mode 100644
index 00000000000..2a06edad088
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/tuf/repository/staging_root.json
@@ -0,0 +1,107 @@
+{
+ "signatures": [
+ {
+ "keyid": "aa61e09f6af7662ac686cf0c6364079f63d3e7a86836684eeced93eace3acd81",
+ "sig": "3046022100fe72afdbab1bef70c6f461f39f5e75cf543e5277648bfab798a108a0f76f0ca002210098e1e1804b7a13bab42c063691864d85fc4bf6f5a875346b388be00f139c6118"
+ },
+ {
+ "keyid": "61f9609d2655b346fcebccd66b509d5828168d5e447110e261f0bcc8553624bc",
+ "sig": "304502210094423ead9a7d546d703f649b408441688eb30f3279fb065b28eea05d2b36843102206f21fa2888836485964c7cb7468a16ddb6297784c50cdba03888578d7b46e0c7"
+ },
+ {
+ "keyid": "9471fbda95411d10109e467ad526082d15f14a38de54ea2ada9687ab39d8e237",
+ "sig": ""
+ },
+ {
+ "keyid": "0374a9e18a20a2103736cb4277e2fdd7f8453642c7d9eaf4ad8aee9cf2d47bb5",
+ "sig": ""
+ }
+ ],
+ "signed": {
+ "_type": "root",
+ "consistent_snapshot": true,
+ "expires": "2025-12-26T13:27:03Z",
+ "keys": {
+ "0374a9e18a20a2103736cb4277e2fdd7f8453642c7d9eaf4ad8aee9cf2d47bb5": {
+ "keytype": "ecdsa",
+ "keyval": {
+ "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEoxkvDOmtGEknB3M+ZkPts8joDM0X\nIH5JZwPlgC2CXs/eqOuNF8AcEWwGYRiDhV/IMlQw5bg8PLICQcgsbrDiKg==\n-----END PUBLIC KEY-----\n"
+ },
+ "scheme": "ecdsa-sha2-nistp256",
+ "x-tuf-on-ci-keyowner": "@mnm678"
+ },
+ "61f9609d2655b346fcebccd66b509d5828168d5e447110e261f0bcc8553624bc": {
+ "keytype": "ecdsa",
+ "keyval": {
+ "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE++Wv+DcLRk+mfkmlpCwl1GUi9EMh\npBUTz8K0fH7bE4mQuViGSyWA/eyMc0HvzZi6Xr0diHw0/lUPBvok214YQw==\n-----END PUBLIC KEY-----\n"
+ },
+ "scheme": "ecdsa-sha2-nistp256",
+ "x-tuf-on-ci-keyowner": "@kommendorkapten"
+ },
+ "9471fbda95411d10109e467ad526082d15f14a38de54ea2ada9687ab39d8e237": {
+ "keytype": "ecdsa",
+ "keyval": {
+ "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFHDb85JH+JYR1LQmxiz4UMokVMnP\nxKoWpaEnFCKXH8W4Fc/DfIxMnkpjCuvWUBdJXkO0aDIxwsij8TOFh2R7dw==\n-----END PUBLIC KEY-----\n"
+ },
+ "scheme": "ecdsa-sha2-nistp256",
+ "x-tuf-on-ci-keyowner": "@joshuagl"
+ },
+ "aa61e09f6af7662ac686cf0c6364079f63d3e7a86836684eeced93eace3acd81": {
+ "keytype": "ecdsa",
+ "keyval": {
+ "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEohqIdE+yTl4OxpX8ZxNUPrg3SL9H\nBDnhZuceKkxy2oMhUOxhWweZeG3bfM1T4ZLnJimC6CAYVU5+F5jZCoftRw==\n-----END PUBLIC KEY-----\n"
+ },
+ "scheme": "ecdsa-sha2-nistp256",
+ "x-tuf-on-ci-keyowner": "@jku"
+ },
+ "c3479007e861445ce5dc109d9661ed77b35bbc0e3f161852c46114266fc2daa4": {
+ "keytype": "ecdsa",
+ "keyval": {
+ "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAExxmEtmhF5U+i+v/6he4BcSLzCgMx\n/0qSrvDg6bUWwUrkSKS2vDpcJrhGy5fmmhRrGawjPp1ALpC3y1kqFTpXDg==\n-----END PUBLIC KEY-----\n"
+ },
+ "scheme": "ecdsa-sha2-nistp256",
+ "x-tuf-on-ci-online-uri": "gcpkms:projects/projectsigstore-staging/locations/global/keyRings/tuf-keyring/cryptoKeys/tuf-key/cryptoKeyVersions/2"
+ }
+ },
+ "roles": {
+ "root": {
+ "keyids": [
+ "aa61e09f6af7662ac686cf0c6364079f63d3e7a86836684eeced93eace3acd81",
+ "61f9609d2655b346fcebccd66b509d5828168d5e447110e261f0bcc8553624bc",
+ "9471fbda95411d10109e467ad526082d15f14a38de54ea2ada9687ab39d8e237",
+ "0374a9e18a20a2103736cb4277e2fdd7f8453642c7d9eaf4ad8aee9cf2d47bb5"
+ ],
+ "threshold": 2
+ },
+ "snapshot": {
+ "keyids": [
+ "c3479007e861445ce5dc109d9661ed77b35bbc0e3f161852c46114266fc2daa4"
+ ],
+ "threshold": 1,
+ "x-tuf-on-ci-expiry-period": 3650,
+ "x-tuf-on-ci-signing-period": 365
+ },
+ "targets": {
+ "keyids": [
+ "aa61e09f6af7662ac686cf0c6364079f63d3e7a86836684eeced93eace3acd81",
+ "61f9609d2655b346fcebccd66b509d5828168d5e447110e261f0bcc8553624bc",
+ "9471fbda95411d10109e467ad526082d15f14a38de54ea2ada9687ab39d8e237",
+ "0374a9e18a20a2103736cb4277e2fdd7f8453642c7d9eaf4ad8aee9cf2d47bb5"
+ ],
+ "threshold": 1
+ },
+ "timestamp": {
+ "keyids": [
+ "c3479007e861445ce5dc109d9661ed77b35bbc0e3f161852c46114266fc2daa4"
+ ],
+ "threshold": 1,
+ "x-tuf-on-ci-expiry-period": 7,
+ "x-tuf-on-ci-signing-period": 6
+ }
+ },
+ "spec_version": "1.0",
+ "version": 12,
+ "x-tuf-on-ci-expiry-period": 182,
+ "x-tuf-on-ci-signing-period": 35
+ }
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/util/util.go b/vendor/github.com/sigstore/sigstore-go/pkg/util/util.go
new file mode 100644
index 00000000000..e5a1b088c18
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/util/util.go
@@ -0,0 +1,37 @@
+// Copyright 2024 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util //nolint:revive
+
+import (
+ "runtime/debug"
+)
+
+func ConstructUserAgent() string {
+ userAgent := "sigstore-go"
+
+ buildInfo, ok := debug.ReadBuildInfo()
+ if !ok {
+ return userAgent
+ }
+
+ for _, eachDep := range buildInfo.Deps {
+ if eachDep.Path == "github.com/sigstore/sigstore-go" {
+ userAgent += "/"
+ userAgent += eachDep.Version
+ }
+ }
+
+ return userAgent
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate.go
new file mode 100644
index 00000000000..e33d915a8d8
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate.go
@@ -0,0 +1,34 @@
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package verify
+
+import (
+ "crypto/x509"
+ "errors"
+ "time"
+
+ "github.com/sigstore/sigstore-go/pkg/root"
+)
+
+func VerifyLeafCertificate(observerTimestamp time.Time, leafCert *x509.Certificate, trustedMaterial root.TrustedMaterial) ([][]*x509.Certificate, error) { // nolint: revive
+ for _, ca := range trustedMaterial.FulcioCertificateAuthorities() {
+ chains, err := ca.Verify(leafCert, observerTimestamp)
+ if err == nil {
+ return chains, nil
+ }
+ }
+
+ return nil, errors.New("leaf certificate verification failed")
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate_identity.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate_identity.go
new file mode 100644
index 00000000000..1d3bad1a714
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/certificate_identity.go
@@ -0,0 +1,217 @@
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package verify
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "regexp"
+
+ "github.com/sigstore/sigstore-go/pkg/fulcio/certificate"
+)
+
+type SubjectAlternativeNameMatcher struct {
+ SubjectAlternativeName string `json:"subjectAlternativeName"`
+ Regexp regexp.Regexp `json:"regexp,omitempty"`
+}
+
+type IssuerMatcher struct {
+ Issuer string `json:"issuer"`
+ Regexp regexp.Regexp `json:"regexp,omitempty"`
+}
+
+type CertificateIdentity struct {
+ SubjectAlternativeName SubjectAlternativeNameMatcher `json:"subjectAlternativeName"`
+ Issuer IssuerMatcher `json:"issuer"`
+ certificate.Extensions
+}
+
+type CertificateIdentities []CertificateIdentity
+
+type ErrValueMismatch struct {
+ object string
+ expected string
+ actual string
+}
+
+func (e *ErrValueMismatch) Error() string {
+ return fmt.Sprintf("expected %s value \"%s\", got \"%s\"", e.object, e.expected, e.actual)
+}
+
+type ErrValueRegexMismatch struct {
+ object string
+ regex string
+ value string
+}
+
+func (e *ErrValueRegexMismatch) Error() string {
+ return fmt.Sprintf("expected %s value to match regex \"%s\", got \"%s\"", e.object, e.regex, e.value)
+}
+
+type ErrNoMatchingCertificateIdentity struct {
+ errors []error
+}
+
+func (e *ErrNoMatchingCertificateIdentity) Error() string {
+ if len(e.errors) > 0 {
+ return fmt.Sprintf("no matching CertificateIdentity found, last error: %v", e.errors[len(e.errors)-1])
+ }
+ return "no matching CertificateIdentity found"
+}
+
+func (e *ErrNoMatchingCertificateIdentity) Unwrap() []error {
+ return e.errors
+}
+
+// NewSANMatcher provides an easier way to create a SubjectAlternativeNameMatcher.
+// If the regexpStr fails to compile into a Regexp, an error is returned.
+func NewSANMatcher(sanValue string, regexpStr string) (SubjectAlternativeNameMatcher, error) {
+ r, err := regexp.Compile(regexpStr)
+ if err != nil {
+ return SubjectAlternativeNameMatcher{}, err
+ }
+
+ return SubjectAlternativeNameMatcher{
+ SubjectAlternativeName: sanValue,
+ Regexp: *r}, nil
+}
+
+// The default Regexp json marshal is quite ugly, so we override it here.
+func (s *SubjectAlternativeNameMatcher) MarshalJSON() ([]byte, error) {
+ return json.Marshal(&struct {
+ SubjectAlternativeName string `json:"subjectAlternativeName"`
+ Regexp string `json:"regexp,omitempty"`
+ }{
+ SubjectAlternativeName: s.SubjectAlternativeName,
+ Regexp: s.Regexp.String(),
+ })
+}
+
+// Verify checks if the actualCert matches the SANMatcher's Value and
+// Regexp – if those values have been provided.
+func (s SubjectAlternativeNameMatcher) Verify(actualCert certificate.Summary) error {
+ if s.SubjectAlternativeName != "" &&
+ actualCert.SubjectAlternativeName != s.SubjectAlternativeName {
+ return &ErrValueMismatch{"SAN", string(s.SubjectAlternativeName), string(actualCert.SubjectAlternativeName)}
+ }
+
+ if s.Regexp.String() != "" &&
+ !s.Regexp.MatchString(actualCert.SubjectAlternativeName) {
+ return &ErrValueRegexMismatch{"SAN", string(s.Regexp.String()), string(actualCert.SubjectAlternativeName)}
+ }
+ return nil
+}
+
+func NewIssuerMatcher(issuerValue, regexpStr string) (IssuerMatcher, error) {
+ r, err := regexp.Compile(regexpStr)
+ if err != nil {
+ return IssuerMatcher{}, err
+ }
+
+ return IssuerMatcher{Issuer: issuerValue, Regexp: *r}, nil
+}
+
+func (i *IssuerMatcher) MarshalJSON() ([]byte, error) {
+ return json.Marshal(&struct {
+ Issuer string `json:"issuer"`
+ Regexp string `json:"regexp,omitempty"`
+ }{
+ Issuer: i.Issuer,
+ Regexp: i.Regexp.String(),
+ })
+}
+
+func (i IssuerMatcher) Verify(actualCert certificate.Summary) error {
+ if i.Issuer != "" &&
+ actualCert.Issuer != i.Issuer {
+ return &ErrValueMismatch{"issuer", string(i.Issuer), string(actualCert.Issuer)}
+ }
+
+ if i.Regexp.String() != "" &&
+ !i.Regexp.MatchString(actualCert.Issuer) {
+ return &ErrValueRegexMismatch{"issuer", string(i.Regexp.String()), string(actualCert.Issuer)}
+ }
+ return nil
+}
+
+func NewCertificateIdentity(sanMatcher SubjectAlternativeNameMatcher, issuerMatcher IssuerMatcher, extensions certificate.Extensions) (CertificateIdentity, error) {
+ if sanMatcher.SubjectAlternativeName == "" && sanMatcher.Regexp.String() == "" {
+ return CertificateIdentity{}, errors.New("when verifying a certificate identity, there must be subject alternative name criteria")
+ }
+
+ if issuerMatcher.Issuer == "" && issuerMatcher.Regexp.String() == "" {
+ return CertificateIdentity{}, errors.New("when verifying a certificate identity, must specify Issuer criteria")
+ }
+
+ if extensions.Issuer != "" {
+ return CertificateIdentity{}, errors.New("please specify issuer in IssuerMatcher, not Extensions")
+ }
+
+ certID := CertificateIdentity{
+ SubjectAlternativeName: sanMatcher,
+ Issuer: issuerMatcher,
+ Extensions: extensions,
+ }
+
+ return certID, nil
+}
+
+// NewShortCertificateIdentity provides a more convenient way of initializing
+// a CertificiateIdentity with a SAN and the Issuer OID extension. If you need
+// to check more OID extensions, use NewCertificateIdentity instead.
+func NewShortCertificateIdentity(issuer, issuerRegex, sanValue, sanRegex string) (CertificateIdentity, error) {
+ sanMatcher, err := NewSANMatcher(sanValue, sanRegex)
+ if err != nil {
+ return CertificateIdentity{}, err
+ }
+
+ issuerMatcher, err := NewIssuerMatcher(issuer, issuerRegex)
+ if err != nil {
+ return CertificateIdentity{}, err
+ }
+
+ return NewCertificateIdentity(sanMatcher, issuerMatcher, certificate.Extensions{})
+}
+
+// Verify verifies the CertificateIdentities, and if ANY of them match the cert,
+// it returns the CertificateIdentity that matched. If none match, it returns an
+// error.
+func (i CertificateIdentities) Verify(cert certificate.Summary) (*CertificateIdentity, error) {
+ multierr := &ErrNoMatchingCertificateIdentity{}
+ var err error
+ for _, ci := range i {
+ if err = ci.Verify(cert); err == nil {
+ return &ci, nil
+ }
+ multierr.errors = append(multierr.errors, err)
+ }
+ return nil, multierr
+}
+
+// Verify checks if the actualCert matches the CertificateIdentity's SAN and
+// any of the provided OID extension values. Any empty values are ignored.
+func (c CertificateIdentity) Verify(actualCert certificate.Summary) error {
+ var err error
+ if err = c.SubjectAlternativeName.Verify(actualCert); err != nil {
+ return err
+ }
+
+ if err = c.Issuer.Verify(actualCert); err != nil {
+ return err
+ }
+
+ return certificate.CompareExtensions(c.Extensions, actualCert.Extensions)
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/errors.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/errors.go
new file mode 100644
index 00000000000..18263c09833
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/errors.go
@@ -0,0 +1,39 @@
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package verify
+
+import (
+ "fmt"
+)
+
+type ErrVerification struct {
+ err error
+}
+
+func NewVerificationError(e error) ErrVerification {
+ return ErrVerification{e}
+}
+
+func (e ErrVerification) Unwrap() error {
+ return e.err
+}
+
+func (e ErrVerification) String() string {
+ return fmt.Sprintf("verification error: %s", e.err.Error())
+}
+
+func (e ErrVerification) Error() string {
+ return e.String()
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/interface.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/interface.go
new file mode 100644
index 00000000000..85bcb50284d
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/interface.go
@@ -0,0 +1,130 @@
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package verify
+
+import (
+ "crypto/x509"
+ "errors"
+ "time"
+
+ in_toto "github.com/in-toto/attestation/go/v1"
+ "github.com/secure-systems-lab/go-securesystemslib/dsse"
+ "github.com/sigstore/sigstore-go/pkg/root"
+ "github.com/sigstore/sigstore-go/pkg/tlog"
+)
+
+var errNotImplemented = errors.New("not implemented")
+
+type HasInclusionPromise interface {
+ HasInclusionPromise() bool
+}
+
+type HasInclusionProof interface {
+ HasInclusionProof() bool
+}
+
+type SignatureProvider interface {
+ SignatureContent() (SignatureContent, error)
+}
+
+type SignedTimestampProvider interface {
+ Timestamps() ([][]byte, error)
+}
+
+type TlogEntryProvider interface {
+ TlogEntries() ([]*tlog.Entry, error)
+}
+
+type VerificationProvider interface {
+ VerificationContent() (VerificationContent, error)
+}
+
+type VersionProvider interface {
+ Version() (string, error)
+}
+
+type SignedEntity interface {
+ HasInclusionPromise
+ HasInclusionProof
+ SignatureProvider
+ SignedTimestampProvider
+ TlogEntryProvider
+ VerificationProvider
+ VersionProvider
+}
+
+type VerificationContent interface {
+ CompareKey(any, root.TrustedMaterial) bool
+ ValidAtTime(time.Time, root.TrustedMaterial) bool
+ Certificate() *x509.Certificate
+ PublicKey() PublicKeyProvider
+}
+
+type SignatureContent interface {
+ Signature() []byte
+ EnvelopeContent() EnvelopeContent
+ MessageSignatureContent() MessageSignatureContent
+}
+
+type PublicKeyProvider interface {
+ Hint() string
+}
+
+type MessageSignatureContent interface {
+ Digest() []byte
+ DigestAlgorithm() string
+ Signature() []byte
+}
+
+type EnvelopeContent interface {
+ RawEnvelope() *dsse.Envelope
+ Statement() (*in_toto.Statement, error)
+}
+
+// BaseSignedEntity is a helper struct that implements all the interfaces
+// of SignedEntity. It can be embedded in a struct to implement the SignedEntity
+// interface. This may be useful for testing, or for implementing a SignedEntity
+// that only implements a subset of the interfaces.
+type BaseSignedEntity struct{}
+
+var _ SignedEntity = &BaseSignedEntity{}
+
+func (b *BaseSignedEntity) HasInclusionPromise() bool {
+ return false
+}
+
+func (b *BaseSignedEntity) HasInclusionProof() bool {
+ return false
+}
+
+func (b *BaseSignedEntity) VerificationContent() (VerificationContent, error) {
+ return nil, errNotImplemented
+}
+
+func (b *BaseSignedEntity) SignatureContent() (SignatureContent, error) {
+ return nil, errNotImplemented
+}
+
+func (b *BaseSignedEntity) Timestamps() ([][]byte, error) {
+ return nil, errNotImplemented
+}
+
+func (b *BaseSignedEntity) TlogEntries() ([]*tlog.Entry, error) {
+ return nil, errNotImplemented
+}
+
+func (b *BaseSignedEntity) Version() (string, error) {
+ return "", errNotImplemented
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/sct.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/sct.go
new file mode 100644
index 00000000000..bf447c28c39
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/sct.go
@@ -0,0 +1,100 @@
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package verify
+
+import (
+ "crypto/x509"
+ "encoding/hex"
+ "errors"
+ "fmt"
+
+ ct "github.com/google/certificate-transparency-go"
+ "github.com/google/certificate-transparency-go/ctutil"
+ ctx509 "github.com/google/certificate-transparency-go/x509"
+ "github.com/google/certificate-transparency-go/x509util"
+ "github.com/sigstore/sigstore-go/pkg/root"
+)
+
+// VerifySignedCertificateTimestamp, given a threshold, TrustedMaterial, and a
+// leaf certificate, will extract SCTs from the leaf certificate and verify the
+// timestamps using the TrustedMaterial's FulcioCertificateAuthorities() and
+// CTLogs()
+func VerifySignedCertificateTimestamp(chains [][]*x509.Certificate, threshold int, trustedMaterial root.TrustedMaterial) error { // nolint: revive
+ if len(chains) == 0 || len(chains[0]) == 0 || chains[0][0] == nil {
+ return errors.New("no chains provided")
+ }
+ // The first certificate in the chain is always the leaf certificate
+ leaf := chains[0][0]
+
+ ctlogs := trustedMaterial.CTLogs()
+
+ scts, err := x509util.ParseSCTsFromCertificate(leaf.Raw)
+ if err != nil {
+ return err
+ }
+
+ leafCTCert, err := ctx509.ParseCertificates(leaf.Raw)
+ if err != nil {
+ return err
+ }
+
+ verified := 0
+ for _, sct := range scts {
+ encodedKeyID := hex.EncodeToString(sct.LogID.KeyID[:])
+ key, ok := ctlogs[encodedKeyID]
+ if !ok {
+ // skip entries the trust root cannot verify
+ continue
+ }
+
+ // Ensure sct is within ctlog validity window
+ sctTime := ct.TimestampToTime(sct.Timestamp)
+ if !key.ValidityPeriodStart.IsZero() && sctTime.Before(key.ValidityPeriodStart) {
+ // skip entries that were before ctlog key start time
+ continue
+ }
+ if !key.ValidityPeriodEnd.IsZero() && sctTime.After(key.ValidityPeriodEnd) {
+ // skip entries that were after ctlog key end time
+ continue
+ }
+
+ for _, chain := range chains {
+ fulcioChain := make([]*ctx509.Certificate, len(leafCTCert))
+ copy(fulcioChain, leafCTCert)
+
+ if len(chain) < 2 {
+ continue
+ }
+ parentCert := chain[1].Raw
+
+ fulcioIssuer, err := ctx509.ParseCertificates(parentCert)
+ if err != nil {
+ continue
+ }
+ fulcioChain = append(fulcioChain, fulcioIssuer...)
+
+ err = ctutil.VerifySCT(key.PublicKey, fulcioChain, sct, true)
+ if err == nil {
+ verified++
+ }
+ }
+ }
+
+ if verified < threshold {
+ return fmt.Errorf("only able to verify %d SCT entries; unable to meet threshold of %d", verified, threshold)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/signature.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/signature.go
new file mode 100644
index 00000000000..0386a98e18c
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/signature.go
@@ -0,0 +1,519 @@
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package verify
+
+import (
+ "bytes"
+ "context"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/x509"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "os"
+ "slices"
+
+ in_toto "github.com/in-toto/attestation/go/v1"
+ "github.com/secure-systems-lab/go-securesystemslib/dsse"
+ v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1"
+ "github.com/sigstore/sigstore-go/pkg/root"
+ "github.com/sigstore/sigstore/pkg/signature"
+ sigdsse "github.com/sigstore/sigstore/pkg/signature/dsse"
+ "github.com/sigstore/sigstore/pkg/signature/options"
+)
+
+const maxAllowedSubjects = 1024
+const maxAllowedSubjectDigests = 32
+
+var ErrDSSEInvalidSignatureCount = errors.New("exactly one signature is required")
+
+func VerifySignature(sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial) error { // nolint: revive
+ verifier, err := getSignatureVerifier(sigContent, verificationContent, trustedMaterial, false)
+ if err != nil {
+ return fmt.Errorf("could not load signature verifier: %w", err)
+ }
+
+ return verifySignatureWithVerifier(verifier, sigContent, verificationContent, trustedMaterial)
+}
+
+func verifySignatureWithVerifier(verifier signature.Verifier, sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial) error { // nolint: revive
+ if envelope := sigContent.EnvelopeContent(); envelope != nil {
+ return verifyEnvelope(verifier, envelope)
+ } else if msg := sigContent.MessageSignatureContent(); msg != nil {
+ return errors.New("artifact must be provided to verify message signature")
+ }
+
+ // handle an invalid signature content message
+ return fmt.Errorf("signature content has neither an envelope or a message")
+}
+
+func VerifySignatureWithArtifacts(sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial, artifacts []io.Reader) error { // nolint: revive
+ verifier, err := getSignatureVerifier(sigContent, verificationContent, trustedMaterial, false)
+ if err != nil {
+ return fmt.Errorf("could not load signature verifier: %w", err)
+ }
+ return verifySignatureWithVerifierAndArtifacts(verifier, sigContent, verificationContent, trustedMaterial, artifacts)
+}
+
+func verifySignatureWithVerifierAndArtifacts(verifier signature.Verifier, sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial, artifacts []io.Reader) error { // nolint: revive
+ envelope := sigContent.EnvelopeContent()
+ msg := sigContent.MessageSignatureContent()
+ if envelope == nil && msg == nil {
+ return fmt.Errorf("signature content has neither an envelope or a message")
+ }
+ // If there is only one artifact and no envelope,
+ // attempt to verify the message signature with the artifact.
+ if envelope == nil {
+ if len(artifacts) != 1 {
+ return fmt.Errorf("only one artifact can be verified with a message signature")
+ }
+ return verifyMessageSignature(verifier, msg, artifacts[0])
+ }
+
+ // Otherwise, verify the envelope with the provided artifacts
+ return verifyEnvelopeWithArtifacts(verifier, envelope, artifacts)
+}
+
+func VerifySignatureWithArtifactDigests(sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial, digests []ArtifactDigest) error { // nolint: revive
+ verifier, err := getSignatureVerifier(sigContent, verificationContent, trustedMaterial, false)
+ if err != nil {
+ return fmt.Errorf("could not load signature verifier: %w", err)
+ }
+ return verifySignatureWithVerifierAndArtifactDigests(verifier, sigContent, verificationContent, trustedMaterial, digests)
+}
+
+func verifySignatureWithVerifierAndArtifactDigests(verifier signature.Verifier, sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial, digests []ArtifactDigest) error { // nolint: revive
+ envelope := sigContent.EnvelopeContent()
+ msg := sigContent.MessageSignatureContent()
+ if envelope == nil && msg == nil {
+ return fmt.Errorf("signature content has neither an envelope or a message")
+ }
+ // If there is only one artifact and no envelope,
+ // attempt to verify the message signature with the artifact.
+ if envelope == nil {
+ if len(digests) != 1 {
+ return fmt.Errorf("only one artifact can be verified with a message signature")
+ }
+ return verifyMessageSignatureWithArtifactDigest(verifier, msg, digests[0].Digest)
+ }
+
+ return verifyEnvelopeWithArtifactDigests(verifier, envelope, digests)
+}
+
+// compatVerifier is a signature.Verifier that tries multiple verifiers
+// and returns nil if any of them verify the signature. This is used to
+// verify signatures that were generated with old clients that used SHA256
+// for ECDSA P384/P521 keys.
+type compatVerifier struct {
+ verifiers []signature.Verifier
+}
+
+func (v *compatVerifier) VerifySignature(signature, message io.Reader, opts ...signature.VerifyOption) error {
+ // Create a buffer to store the signature bytes
+ sigBuf := &bytes.Buffer{}
+ sigTee := io.TeeReader(signature, sigBuf)
+ sigBytes, err := io.ReadAll(sigTee)
+ if err != nil {
+ return fmt.Errorf("failed to read signature: %w", err)
+ }
+
+ // Create a buffer to store the message bytes
+ msgBuf := &bytes.Buffer{}
+ msgTee := io.TeeReader(message, msgBuf)
+ msgBytes, err := io.ReadAll(msgTee)
+ if err != nil {
+ return fmt.Errorf("failed to read message: %w", err)
+ }
+
+ for idx, verifier := range v.verifiers {
+ if idx != 0 {
+ fmt.Fprint(os.Stderr, "Failed to verify signature with default verifier, trying compatibility verifier\n")
+ }
+ err := verifier.VerifySignature(bytes.NewReader(sigBytes), bytes.NewReader(msgBytes), opts...)
+ if err == nil {
+ return nil
+ }
+ }
+ return fmt.Errorf("no compatible verifier found")
+}
+
+func (v *compatVerifier) PublicKey(opts ...signature.PublicKeyOption) (crypto.PublicKey, error) {
+ return v.verifiers[0].PublicKey(opts...)
+}
+
+func compatSignatureVerifier(leafCert *x509.Certificate, enableCompat bool, isDSSE bool) (signature.Verifier, error) {
+ // LoadDefaultSigner/Verifier functions accept a few options to select
+ // the default signer/verifier when there are ambiguities, like for
+ // ED25519 keys, which could be used with PureEd25519 or Ed25519ph.
+ //
+ // When dealing with DSSE, use ED25519, but when we are working with
+ // hashedrekord entries, use ED25519ph by default, because this is the
+ // only option.
+ var defaultOpts []signature.LoadOption
+ if !isDSSE {
+ defaultOpts = []signature.LoadOption{options.WithED25519ph()}
+ }
+
+ verifiers := make([]signature.Verifier, 0)
+ verifier, err := signature.LoadDefaultVerifier(leafCert.PublicKey, defaultOpts...)
+ if err != nil {
+ return nil, err
+ }
+ // If compatibility is not enabled, return only the default verifier
+ if !enableCompat {
+ return verifier, nil
+ }
+ verifiers = append(verifiers, verifier)
+
+ // Add a compatibility verifier for ECDSA P384/P521, because we still want
+ // to verify signatures generated with old clients that used SHA256
+ var algorithmDetails signature.AlgorithmDetails
+ if pubKey, ok := leafCert.PublicKey.(*ecdsa.PublicKey); ok {
+ switch pubKey.Curve {
+ case elliptic.P384():
+ //nolint:staticcheck // Need to use deprecated field for backwards compatibility
+ algorithmDetails, err = signature.GetAlgorithmDetails(v1.PublicKeyDetails_PKIX_ECDSA_P384_SHA_256)
+ case elliptic.P521():
+ //nolint:staticcheck // Need to use deprecated field for backwards compatibility
+ algorithmDetails, err = signature.GetAlgorithmDetails(v1.PublicKeyDetails_PKIX_ECDSA_P521_SHA_256)
+ default:
+ return verifier, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ verifier, err = signature.LoadVerifierFromAlgorithmDetails(leafCert.PublicKey, algorithmDetails, defaultOpts...)
+ }
+ if err != nil {
+ return nil, err
+ }
+ verifiers = append(verifiers, verifier)
+ return &compatVerifier{verifiers: verifiers}, nil
+}
+
+func getSignatureVerifier(sigContent SignatureContent, verificationContent VerificationContent, tm root.TrustedMaterial, enableCompat bool) (signature.Verifier, error) {
+ if leafCert := verificationContent.Certificate(); leafCert != nil {
+ isDSSE := sigContent.EnvelopeContent() != nil
+ return compatSignatureVerifier(leafCert, enableCompat, isDSSE)
+ } else if pk := verificationContent.PublicKey(); pk != nil {
+ return tm.PublicKeyVerifier(pk.Hint())
+ }
+
+ return nil, fmt.Errorf("no public key or certificate found")
+}
+
+func verifyEnvelope(verifier signature.Verifier, envelope EnvelopeContent) error {
+ dsseEnv := envelope.RawEnvelope()
+
+ // A DSSE envelope in a Sigstore bundle MUST only contain one
+ // signature, even though DSSE is more permissive.
+ if len(dsseEnv.Signatures) != 1 {
+ return ErrDSSEInvalidSignatureCount
+ }
+ pub, err := verifier.PublicKey()
+ if err != nil {
+ return fmt.Errorf("could not fetch verifier public key: %w", err)
+ }
+ envVerifier, err := dsse.NewEnvelopeVerifier(&sigdsse.VerifierAdapter{
+ SignatureVerifier: verifier,
+ Pub: pub,
+ })
+
+ if err != nil {
+ return fmt.Errorf("could not load envelope verifier: %w", err)
+ }
+
+ _, err = envVerifier.Verify(context.Background(), dsseEnv)
+ if err != nil {
+ return fmt.Errorf("could not verify envelope: %w", err)
+ }
+
+ return nil
+}
+
+func verifyEnvelopeWithArtifacts(verifier signature.Verifier, envelope EnvelopeContent, artifacts []io.Reader) error {
+ if err := verifyEnvelope(verifier, envelope); err != nil {
+ return err
+ }
+ statement, err := envelope.Statement()
+ if err != nil {
+ return fmt.Errorf("could not verify artifact: unable to extract statement from envelope: %w", err)
+ }
+ if err = limitSubjects(statement); err != nil {
+ return err
+ }
+ // Sanity check (no subjects)
+ if len(statement.Subject) == 0 {
+ return errors.New("no subjects found in statement")
+ }
+
+ // determine which hash functions to use
+ hashFuncs, err := getHashFunctions(statement)
+ if err != nil {
+ return fmt.Errorf("unable to determine hash functions: %w", err)
+ }
+
+ hashedArtifacts := make([]map[crypto.Hash][]byte, len(artifacts))
+ for i, artifact := range artifacts {
+ // Compute digest of the artifact.
+ hasher, err := newMultihasher(hashFuncs)
+ if err != nil {
+ return fmt.Errorf("could not verify artifact: unable to create hasher: %w", err)
+ }
+ if _, err = io.Copy(hasher, artifact); err != nil {
+ return fmt.Errorf("could not verify artifact: unable to calculate digest: %w", err)
+ }
+ hashedArtifacts[i] = hasher.Sum(nil)
+ }
+
+ // create a map based on the digests present in the statement
+ // the map key is the hash algorithm and the field is a slice of digests
+ // created using that hash algorithm
+ subjectDigests := make(map[crypto.Hash][][]byte)
+ for _, subject := range statement.Subject {
+ for alg, hexdigest := range subject.Digest {
+ hf, err := algStringToHashFunc(alg)
+ if err != nil {
+ continue
+ }
+ if _, ok := subjectDigests[hf]; !ok {
+ subjectDigests[hf] = make([][]byte, 0)
+ }
+ digest, err := hex.DecodeString(hexdigest)
+ if err != nil {
+ continue
+ }
+ subjectDigests[hf] = append(subjectDigests[hf], digest)
+ }
+ }
+
+ // now loop over the provided artifact digests and try to compare them
+ // to the mapped subject digests
+ // if we cannot find a match, exit with an error
+ for _, ha := range hashedArtifacts {
+ matchFound := false
+ for key, value := range ha {
+ statementDigests, ok := subjectDigests[key]
+ if !ok {
+ return fmt.Errorf("no matching artifact hash algorithm found in subject digests")
+ }
+ if ok := isDigestInSlice(value, statementDigests); ok {
+ matchFound = true
+ break
+ }
+ }
+ if !matchFound {
+ return fmt.Errorf("provided artifact digests do not match digests in statement")
+ }
+ }
+
+ return nil
+}
+
+func verifyEnvelopeWithArtifactDigests(verifier signature.Verifier, envelope EnvelopeContent, digests []ArtifactDigest) error {
+ if err := verifyEnvelope(verifier, envelope); err != nil {
+ return err
+ }
+ statement, err := envelope.Statement()
+ if err != nil {
+ return fmt.Errorf("could not verify artifact: unable to extract statement from envelope: %w", err)
+ }
+ if err = limitSubjects(statement); err != nil {
+ return err
+ }
+
+ // create a map based on the digests present in the statement
+ // the map key is the hash algorithm and the field is a slice of digests
+ // created using that hash algorithm
+ subjectDigests := make(map[string][][]byte)
+ for _, subject := range statement.Subject {
+ for alg, digest := range subject.Digest {
+ if _, ok := subjectDigests[alg]; !ok {
+ subjectDigests[alg] = make([][]byte, 0)
+ }
+ hexdigest, err := hex.DecodeString(digest)
+ if err != nil {
+ return fmt.Errorf("could not verify artifact: unable to decode subject digest: %w", err)
+ }
+ subjectDigests[alg] = append(subjectDigests[alg], hexdigest)
+ }
+ }
+
+ // now loop over the provided artifact digests and compare them to the mapped subject digests
+ // if we cannot find a match, exit with an error
+ for _, artifactDigest := range digests {
+ statementDigests, ok := subjectDigests[artifactDigest.Algorithm]
+ if !ok {
+ return fmt.Errorf("provided artifact digests does not match digests in statement")
+ }
+ if ok := isDigestInSlice(artifactDigest.Digest, statementDigests); !ok {
+ return fmt.Errorf("provided artifact digest does not match any digest in statement")
+ }
+ }
+
+ return nil
+}
+
+func isDigestInSlice(digest []byte, digestSlice [][]byte) bool {
+ for _, el := range digestSlice {
+ if bytes.Equal(digest, el) {
+ return true
+ }
+ }
+ return false
+}
+
+func verifyMessageSignature(verifier signature.Verifier, msg MessageSignatureContent, artifact io.Reader) error {
+ err := verifier.VerifySignature(bytes.NewReader(msg.Signature()), artifact)
+ if err != nil {
+ return fmt.Errorf("could not verify message: %w", err)
+ }
+
+ return nil
+}
+
+func verifyMessageSignatureWithArtifactDigest(verifier signature.Verifier, msg MessageSignatureContent, artifactDigest []byte) error {
+ if !bytes.Equal(artifactDigest, msg.Digest()) {
+ return errors.New("artifact does not match digest")
+ }
+ if _, ok := verifier.(*signature.ED25519Verifier); ok {
+ return errors.New("message signatures with ed25519 signatures can only be verified with artifacts, and not just their digest")
+ }
+ err := verifier.VerifySignature(bytes.NewReader(msg.Signature()), bytes.NewReader([]byte{}), options.WithDigest(artifactDigest))
+
+ if err != nil {
+ return fmt.Errorf("could not verify message: %w", err)
+ }
+
+ return nil
+}
+
+// limitSubjects limits the number of subjects and digests in a statement to prevent DoS.
+func limitSubjects(statement *in_toto.Statement) error {
+ if len(statement.Subject) > maxAllowedSubjects {
+ return fmt.Errorf("too many subjects: %d > %d", len(statement.Subject), maxAllowedSubjects)
+ }
+ for _, subject := range statement.Subject {
+ // limit the number of digests too
+ if len(subject.Digest) > maxAllowedSubjectDigests {
+ return fmt.Errorf("too many digests: %d > %d", len(subject.Digest), maxAllowedSubjectDigests)
+ }
+ }
+ return nil
+}
+
+type multihasher struct {
+ io.Writer
+ hashfuncs []crypto.Hash
+ hashes []io.Writer
+}
+
+func newMultihasher(hashfuncs []crypto.Hash) (*multihasher, error) {
+ if len(hashfuncs) == 0 {
+ return nil, errors.New("no hash functions specified")
+ }
+ hashes := make([]io.Writer, len(hashfuncs))
+ for i := range hashfuncs {
+ hashes[i] = hashfuncs[i].New()
+ }
+ return &multihasher{
+ Writer: io.MultiWriter(hashes...),
+ hashfuncs: hashfuncs,
+ hashes: hashes,
+ }, nil
+}
+
+func (m *multihasher) Sum(b []byte) map[crypto.Hash][]byte {
+ sums := make(map[crypto.Hash][]byte, len(m.hashes))
+ for i := range m.hashes {
+ sums[m.hashfuncs[i]] = m.hashes[i].(hash.Hash).Sum(b)
+ }
+ return sums
+}
+
+func algStringToHashFunc(alg string) (crypto.Hash, error) {
+ switch alg {
+ case "sha256":
+ return crypto.SHA256, nil
+ case "sha384":
+ return crypto.SHA384, nil
+ case "sha512":
+ return crypto.SHA512, nil
+ default:
+ return 0, errors.New("unsupported digest algorithm")
+ }
+}
+
+// getHashFunctions returns the smallest subset of supported hash functions
+// that are needed to verify all subjects in a statement.
+func getHashFunctions(statement *in_toto.Statement) ([]crypto.Hash, error) {
+ if len(statement.Subject) == 0 {
+ return nil, errors.New("no subjects found in statement")
+ }
+
+ supportedHashFuncs := []crypto.Hash{crypto.SHA512, crypto.SHA384, crypto.SHA256}
+ chosenHashFuncs := make([]crypto.Hash, 0, len(supportedHashFuncs))
+ subjectHashFuncs := make([][]crypto.Hash, len(statement.Subject))
+
+ // go through the statement and make a simple data structure to hold the
+ // list of hash funcs for each subject (subjectHashFuncs)
+ for i, subject := range statement.Subject {
+ for alg := range subject.Digest {
+ hf, err := algStringToHashFunc(alg)
+ if err != nil {
+ continue
+ }
+ subjectHashFuncs[i] = append(subjectHashFuncs[i], hf)
+ }
+ }
+
+ // for each subject, see if we have chosen a compatible hash func, and if
+ // not, add the first one that is supported
+ for _, hfs := range subjectHashFuncs {
+ // if any of the hash funcs are already in chosenHashFuncs, skip
+ if len(intersection(hfs, chosenHashFuncs)) > 0 {
+ continue
+ }
+
+ // check each supported hash func and add it if the subject
+ // has a digest for it
+ for _, hf := range supportedHashFuncs {
+ if slices.Contains(hfs, hf) {
+ chosenHashFuncs = append(chosenHashFuncs, hf)
+ break
+ }
+ }
+ }
+
+ if len(chosenHashFuncs) == 0 {
+ return nil, errors.New("no supported digest algorithms found")
+ }
+
+ return chosenHashFuncs, nil
+}
+
+func intersection(a, b []crypto.Hash) []crypto.Hash {
+ var result []crypto.Hash
+ for _, x := range a {
+ if slices.Contains(b, x) {
+ result = append(result, x)
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/signed_entity.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/signed_entity.go
new file mode 100644
index 00000000000..5751ec852cb
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/signed_entity.go
@@ -0,0 +1,882 @@
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package verify
+
+import (
+ "crypto/x509"
+ "encoding/asn1"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "time"
+
+ in_toto "github.com/in-toto/attestation/go/v1"
+ "github.com/sigstore/sigstore-go/pkg/fulcio/certificate"
+ "github.com/sigstore/sigstore-go/pkg/root"
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+ "google.golang.org/protobuf/encoding/protojson"
+)
+
+const (
+ VerificationResultMediaType01 = "application/vnd.dev.sigstore.verificationresult+json;version=0.1"
+)
+
+type Verifier struct {
+ trustedMaterial root.TrustedMaterial
+ config VerifierConfig
+}
+
+type VerifierConfig struct { // nolint: revive
+ // requireSignedTimestamps requires RFC3161 timestamps to verify
+ // short-lived certificates
+ requireSignedTimestamps bool
+ // signedTimestampThreshold is the minimum number of verified
+ // RFC3161 timestamps in a bundle
+ signedTimestampThreshold int
+ // requireIntegratedTimestamps requires log entry integrated timestamps to
+ // verify short-lived certificates
+ requireIntegratedTimestamps bool
+ // integratedTimeThreshold is the minimum number of log entry
+ // integrated timestamps in a bundle
+ integratedTimeThreshold int
+ // requireObserverTimestamps requires RFC3161 timestamps and/or log
+ // integrated timestamps to verify short-lived certificates
+ requireObserverTimestamps bool
+ // observerTimestampThreshold is the minimum number of verified
+ // RFC3161 timestamps and/or log integrated timestamps in a bundle
+ observerTimestampThreshold int
+ // requireTlogEntries requires log inclusion proofs in a bundle
+ requireTlogEntries bool
+ // tlogEntriesThreshold is the minimum number of verified inclusion
+ // proofs in a bundle
+ tlogEntriesThreshold int
+ // requireSCTs requires SCTs in Fulcio certificates
+ requireSCTs bool
+ // ctlogEntriesThreshold is the minimum number of verified SCTs in
+ // a Fulcio certificate
+ ctlogEntriesThreshold int
+ // useCurrentTime uses the current time rather than a provided signed
+ // or log timestamp. Most workflows will not use this option
+ useCurrentTime bool
+ // allowNoTimestamp can be used to skip timestamp checks when a key
+ // is used rather than a certificate.
+ allowNoTimestamp bool
+}
+
+type VerifierOption func(*VerifierConfig) error
+
+// NewVerifier creates a new Verifier. It takes a
+// root.TrustedMaterial, which contains a set of trusted public keys and
+// certificates, and a set of VerifierConfigurators, which set the config
+// that determines the behaviour of the Verify function.
+//
+// VerifierConfig's set of options should match the properties of a given
+// Sigstore deployment, i.e. whether to expect SCTs, Tlog entries, or signed
+// timestamps.
+func NewVerifier(trustedMaterial root.TrustedMaterial, options ...VerifierOption) (*Verifier, error) {
+ var err error
+ c := VerifierConfig{}
+
+ for _, opt := range options {
+ err = opt(&c)
+ if err != nil {
+ return nil, fmt.Errorf("failed to configure verifier: %w", err)
+ }
+ }
+
+ err = c.Validate()
+ if err != nil {
+ return nil, err
+ }
+
+ v := &Verifier{
+ trustedMaterial: trustedMaterial,
+ config: c,
+ }
+
+ return v, nil
+}
+
+// TODO: Remove the following deprecated functions in a future release before sigstore-go 2.0.
+
+// Deprecated: Use Verifier instead
+type SignedEntityVerifier = Verifier
+
+// Deprecated: Use NewVerifier instead
+func NewSignedEntityVerifier(trustedMaterial root.TrustedMaterial, options ...VerifierOption) (*Verifier, error) {
+ return NewVerifier(trustedMaterial, options...)
+}
+
+// WithSignedTimestamps configures the Verifier to expect RFC 3161
+// timestamps from a Timestamp Authority, verify them using the TrustedMaterial's
+// TimestampingAuthorities(), and, if it exists, use the resulting timestamp(s)
+// to verify the Fulcio certificate.
+func WithSignedTimestamps(threshold int) VerifierOption {
+ return func(c *VerifierConfig) error {
+ if threshold < 1 {
+ return errors.New("signed timestamp threshold must be at least 1")
+ }
+ c.requireSignedTimestamps = true
+ c.signedTimestampThreshold = threshold
+ return nil
+ }
+}
+
+// WithObserverTimestamps configures the Verifier to expect
+// timestamps from either an RFC3161 timestamp authority or a log's
+// SignedEntryTimestamp. These are verified using the TrustedMaterial's
+// TimestampingAuthorities() or RekorLogs(), and used to verify
+// the Fulcio certificate.
+func WithObserverTimestamps(threshold int) VerifierOption {
+ return func(c *VerifierConfig) error {
+ if threshold < 1 {
+ return errors.New("observer timestamp threshold must be at least 1")
+ }
+ c.requireObserverTimestamps = true
+ c.observerTimestampThreshold = threshold
+ return nil
+ }
+}
+
+// WithTransparencyLog configures the Verifier to expect
+// Transparency Log inclusion proofs or SignedEntryTimestamps, verifying them
+// using the TrustedMaterial's RekorLogs().
+func WithTransparencyLog(threshold int) VerifierOption {
+ return func(c *VerifierConfig) error {
+ if threshold < 1 {
+ return errors.New("transparency log entry threshold must be at least 1")
+ }
+ c.requireTlogEntries = true
+ c.tlogEntriesThreshold = threshold
+ return nil
+ }
+}
+
+// WithIntegratedTimestamps configures the Verifier to
+// expect log entry integrated timestamps from either SignedEntryTimestamps
+// or live log lookups.
+func WithIntegratedTimestamps(threshold int) VerifierOption {
+ return func(c *VerifierConfig) error {
+ c.requireIntegratedTimestamps = true
+ c.integratedTimeThreshold = threshold
+ return nil
+ }
+}
+
+// WithSignedCertificateTimestamps configures the Verifier to
+// expect the Fulcio certificate to have a SignedCertificateTimestamp, and
+// verify it using the TrustedMaterial's CTLogAuthorities().
+func WithSignedCertificateTimestamps(threshold int) VerifierOption {
+ return func(c *VerifierConfig) error {
+ if threshold < 1 {
+ return errors.New("ctlog entry threshold must be at least 1")
+ }
+ c.requireSCTs = true
+ c.ctlogEntriesThreshold = threshold
+ return nil
+ }
+}
+
+// WithCurrentTime configures the Verifier to not expect
+// any timestamps from either a Timestamp Authority or a Transparency Log.
+// This option should not be enabled when verifying short-lived certificates,
+// as an observer timestamp is needed. This option is useful primarily for
+// private deployments with long-lived code signing certificates.
+func WithCurrentTime() VerifierOption {
+ return func(c *VerifierConfig) error {
+ c.useCurrentTime = true
+ return nil
+ }
+}
+
+// WithNoObserverTimestamps configures the Verifier to not expect
+// any timestamps from either a Timestamp Authority or a Transparency Log
+// and to not use the current time to verify a certificate. This may only
+// be used when verifying with keys rather than certificates.
+func WithNoObserverTimestamps() VerifierOption {
+ return func(c *VerifierConfig) error {
+ c.allowNoTimestamp = true
+ return nil
+ }
+}
+
+func (c *VerifierConfig) Validate() error {
+ if c.allowNoTimestamp && (c.requireObserverTimestamps || c.requireSignedTimestamps || c.requireIntegratedTimestamps || c.useCurrentTime) {
+ return errors.New("specify WithNoObserverTimestamps() without any other verifier options")
+ }
+ if !c.requireObserverTimestamps && !c.requireSignedTimestamps && !c.requireIntegratedTimestamps && !c.useCurrentTime && !c.allowNoTimestamp {
+ return errors.New("when initializing a new Verifier, you must specify at least one of " +
+ "WithObserverTimestamps(), WithSignedTimestamps(), WithIntegratedTimestamps() or WithCurrentTime(), " +
+ "or exclusively specify WithNoObserverTimestamps()")
+ }
+
+ return nil
+}
+
+type VerificationResult struct {
+ MediaType string `json:"mediaType"`
+ Statement *in_toto.Statement `json:"statement,omitempty"`
+ Signature *SignatureVerificationResult `json:"signature,omitempty"`
+ VerifiedTimestamps []TimestampVerificationResult `json:"verifiedTimestamps"`
+ VerifiedIdentity *CertificateIdentity `json:"verifiedIdentity,omitempty"`
+}
+
+type SignatureVerificationResult struct {
+ PublicKeyID *[]byte `json:"publicKeyId,omitempty"`
+ Certificate *certificate.Summary `json:"certificate,omitempty"`
+}
+
+type TimestampVerificationResult struct {
+ Type string `json:"type"`
+ URI string `json:"uri"`
+ Timestamp time.Time `json:"timestamp"`
+}
+
+func NewVerificationResult() *VerificationResult {
+ return &VerificationResult{
+ MediaType: VerificationResultMediaType01,
+ }
+}
+
+// MarshalJSON deals with protojson needed for the Statement.
+// Can be removed when https://github.com/in-toto/attestation/pull/403 is merged.
+func (b *VerificationResult) MarshalJSON() ([]byte, error) {
+ statement, err := protojson.Marshal(b.Statement)
+ if err != nil {
+ return nil, err
+ }
+ // creating a type alias to avoid infinite recursion, as MarshalJSON is
+ // not copied into the alias.
+ type Alias VerificationResult
+ return json.Marshal(struct {
+ Alias
+ Statement json.RawMessage `json:"statement,omitempty"`
+ }{
+ Alias: Alias(*b),
+ Statement: statement,
+ })
+}
+
+func (b *VerificationResult) UnmarshalJSON(data []byte) error {
+ b.Statement = &in_toto.Statement{}
+ type Alias VerificationResult
+ aux := &struct {
+ Alias
+ Statement json.RawMessage `json:"statement,omitempty"`
+ }{
+ Alias: Alias(*b),
+ }
+ if err := json.Unmarshal(data, aux); err != nil {
+ return err
+ }
+ return protojson.Unmarshal(aux.Statement, b.Statement)
+}
+
+type PolicyOption func(*PolicyConfig) error
+type ArtifactPolicyOption func(*PolicyConfig) error
+
+// PolicyBuilder is responsible for building & validating a PolicyConfig
+type PolicyBuilder struct {
+ artifactPolicy ArtifactPolicyOption
+ policyOptions []PolicyOption
+}
+
+func (pc PolicyBuilder) options() []PolicyOption {
+ arr := []PolicyOption{PolicyOption(pc.artifactPolicy)}
+ return append(arr, pc.policyOptions...)
+}
+
+func (pc PolicyBuilder) BuildConfig() (*PolicyConfig, error) {
+ var err error
+
+ policy := &PolicyConfig{}
+ for _, applyOption := range pc.options() {
+ err = applyOption(policy)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if err := policy.validate(); err != nil {
+ return nil, err
+ }
+
+ return policy, nil
+}
+
+type ArtifactDigest struct {
+ Algorithm string
+ Digest []byte
+}
+
+type PolicyConfig struct {
+ ignoreArtifact bool
+ ignoreIdentities bool
+ requireSigningKey bool
+ certificateIdentities CertificateIdentities
+ verifyArtifacts bool
+ artifacts []io.Reader
+ verifyArtifactDigests bool
+ artifactDigests []ArtifactDigest
+}
+
+func (p *PolicyConfig) withVerifyAlreadyConfigured() error {
+ if p.verifyArtifacts || p.verifyArtifactDigests {
+ return errors.New("only one invocation of WithArtifact/WithArtifacts/WithArtifactDigest/WithArtifactDigests is allowed")
+ }
+
+ return nil
+}
+
+func (p *PolicyConfig) validate() error {
+ if p.RequireIdentities() && len(p.certificateIdentities) == 0 {
+ return errors.New("can't verify identities without providing at least one identity")
+ }
+
+ return nil
+}
+
+// RequireArtifact returns true if the Verify algorithm should perform
+// signature verification with an an artifact provided by either the
+// WithArtifact or the WithArtifactDigest functions.
+//
+// By default, unless explicitly turned off, we should always expect to verify
+// a SignedEntity's signature using an artifact. Bools are initialized to false,
+// so this behaviour is therefore controlled by the ignoreArtifact field.
+//
+// Double negatives are confusing, though. To aid with comprehension of the
+// main Verify loop, this function therefore just wraps the double negative.
+func (p *PolicyConfig) RequireArtifact() bool {
+ return !p.ignoreArtifact
+}
+
+// RequireIdentities returns true if the Verify algorithm should check
+// whether the SignedEntity's certificate was created by one of the identities
+// provided by the WithCertificateIdentity function.
+//
+// By default, unless explicitly turned off, we should always expect to enforce
+// that a SignedEntity's certificate was created by an Identity we trust. Bools
+// are initialized to false, so this behaviour is therefore controlled by the
+// ignoreIdentities field.
+//
+// Double negatives are confusing, though. To aid with comprehension of the
+// main Verify loop, this function therefore just wraps the double negative.
+func (p *PolicyConfig) RequireIdentities() bool {
+ return !p.ignoreIdentities
+}
+
+// RequireSigningKey returns true if we expect the SignedEntity to be signed
+// with a key and not a certificate.
+func (p *PolicyConfig) RequireSigningKey() bool {
+ return p.requireSigningKey
+}
+
+func NewPolicy(artifactOpt ArtifactPolicyOption, options ...PolicyOption) PolicyBuilder {
+ return PolicyBuilder{artifactPolicy: artifactOpt, policyOptions: options}
+}
+
+// WithoutIdentitiesUnsafe allows the caller of Verify to skip enforcing any
+// checks on the identity that created the SignedEntity being verified.
+//
+// Do not use this option unless you know what you are doing!
+//
+// As the name implies, using WithoutIdentitiesUnsafe is not safe: outside of
+// exceptional circumstances, we should always enforce that the SignedEntity
+// being verified was signed by a trusted CertificateIdentity.
+//
+// For more information, consult WithCertificateIdentity.
+func WithoutIdentitiesUnsafe() PolicyOption {
+ return func(p *PolicyConfig) error {
+ if len(p.certificateIdentities) > 0 {
+ return errors.New("can't use WithoutIdentitiesUnsafe while specifying CertificateIdentities")
+ }
+
+ p.ignoreIdentities = true
+ return nil
+ }
+}
+
+// WithCertificateIdentity allows the caller of Verify to enforce that the
+// SignedEntity being verified was signed by the given identity, as defined by
+// the Fulcio certificate embedded in the entity. If this policy is enabled,
+// but the SignedEntity does not have a certificate, verification will fail.
+//
+// Providing this function multiple times will concatenate the provided
+// CertificateIdentity to the list of identities being checked.
+//
+// If all of the provided CertificateIdentities fail to match the Fulcio
+// certificate, then verification will fail. If *any* CertificateIdentity
+// matches, then verification will succeed. Therefore, each CertificateIdentity
+// provided to this function must define a "sufficient" identity to trust.
+//
+// The CertificateIdentity struct allows callers to specify:
+// - The exact value, or Regexp, of the SubjectAlternativeName
+// - The exact value of any Fulcio OID X.509 extension, i.e. Issuer
+//
+// For convenience, consult the NewShortCertificateIdentity function.
+func WithCertificateIdentity(identity CertificateIdentity) PolicyOption {
+ return func(p *PolicyConfig) error {
+ if p.ignoreIdentities {
+ return errors.New("can't use WithCertificateIdentity while using WithoutIdentitiesUnsafe")
+ }
+ if p.requireSigningKey {
+ return errors.New("can't use WithCertificateIdentity while using WithKey")
+ }
+
+ p.certificateIdentities = append(p.certificateIdentities, identity)
+ return nil
+ }
+}
+
+// WithKey allows the caller of Verify to require the SignedEntity being
+// verified was signed with a key and not a certificate.
+func WithKey() PolicyOption {
+ return func(p *PolicyConfig) error {
+ if len(p.certificateIdentities) > 0 {
+ return errors.New("can't use WithKey while using WithCertificateIdentity")
+ }
+
+ p.requireSigningKey = true
+ p.ignoreIdentities = true
+ return nil
+ }
+}
+
+// WithoutArtifactUnsafe allows the caller of Verify to skip checking whether
+// the SignedEntity was created from, or references, an artifact.
+//
+// WithoutArtifactUnsafe can only be used with SignedEntities that contain a
+// DSSE envelope. If the the SignedEntity has a MessageSignature, providing
+// this policy option will cause verification to always fail, since
+// MessageSignatures can only be verified in the presence of an Artifact or
+// artifact digest. See WithArtifact/WithArtifactDigest for more information.
+//
+// Do not use this function unless you know what you are doing!
+//
+// As the name implies, using WithoutArtifactUnsafe is not safe: outside of
+// exceptional circumstances, SignedEntities should always be verified with
+// an artifact.
+func WithoutArtifactUnsafe() ArtifactPolicyOption {
+ return func(p *PolicyConfig) error {
+ if err := p.withVerifyAlreadyConfigured(); err != nil {
+ return err
+ }
+
+ p.ignoreArtifact = true
+ return nil
+ }
+}
+
+// WithArtifact allows the caller of Verify to enforce that the SignedEntity
+// being verified was created from, or references, a given artifact.
+//
+// If the SignedEntity contains a DSSE envelope, then the artifact digest is
+// calculated from the given artifact, and compared to the digest in the
+// envelope's statement.
+func WithArtifact(artifact io.Reader) ArtifactPolicyOption {
+ return func(p *PolicyConfig) error {
+ if err := p.withVerifyAlreadyConfigured(); err != nil {
+ return err
+ }
+
+ if p.ignoreArtifact {
+ return errors.New("can't use WithArtifact while using WithoutArtifactUnsafe")
+ }
+
+ p.verifyArtifacts = true
+ p.artifacts = []io.Reader{artifact}
+ return nil
+ }
+}
+
+// WithArtifacts allows the caller of Verify to enforce that the SignedEntity
+// being verified was created from, or references, a slice of artifacts.
+//
+// If the SignedEntity contains a DSSE envelope, then the artifact digest is
+// calculated from the given artifact, and compared to the digest in the
+// envelope's statement.
+func WithArtifacts(artifacts []io.Reader) ArtifactPolicyOption {
+ return func(p *PolicyConfig) error {
+ if err := p.withVerifyAlreadyConfigured(); err != nil {
+ return err
+ }
+
+ if p.ignoreArtifact {
+ return errors.New("can't use WithArtifacts while using WithoutArtifactUnsafe")
+ }
+
+ p.verifyArtifacts = true
+ p.artifacts = artifacts
+ return nil
+ }
+}
+
+// WithArtifactDigest allows the caller of Verify to enforce that the
+// SignedEntity being verified was created for a given artifact digest.
+//
+// If the SignedEntity contains a MessageSignature that was signed using the
+// ED25519 algorithm, then providing only an artifactDigest will fail; the
+// whole artifact must be provided. Use WithArtifact instead.
+//
+// If the SignedEntity contains a DSSE envelope, then the artifact digest is
+// compared to the digest in the envelope's statement.
+func WithArtifactDigest(algorithm string, artifactDigest []byte) ArtifactPolicyOption {
+ return func(p *PolicyConfig) error {
+ if err := p.withVerifyAlreadyConfigured(); err != nil {
+ return err
+ }
+
+ if p.ignoreArtifact {
+ return errors.New("can't use WithArtifactDigest while using WithoutArtifactUnsafe")
+ }
+
+ p.verifyArtifactDigests = true
+ p.artifactDigests = []ArtifactDigest{{
+ Algorithm: algorithm,
+ Digest: artifactDigest,
+ }}
+ return nil
+ }
+}
+
+// WithArtifactDigests allows the caller of Verify to enforce that the
+// SignedEntity being verified was created for a given array of artifact digests.
+//
+// If the SignedEntity contains a DSSE envelope, then the artifact digests
+// are compared to the digests in the envelope's statement.
+//
+// If the SignedEntity does not contain a DSSE envelope, verification fails.
+func WithArtifactDigests(digests []ArtifactDigest) ArtifactPolicyOption {
+ return func(p *PolicyConfig) error {
+ if err := p.withVerifyAlreadyConfigured(); err != nil {
+ return err
+ }
+
+ if p.ignoreArtifact {
+ return errors.New("can't use WithArtifactDigests while using WithoutArtifactUnsafe")
+ }
+
+ p.verifyArtifactDigests = true
+ p.artifactDigests = digests
+ return nil
+ }
+}
+
+// Verify checks the cryptographic integrity of a given SignedEntity according
+// to the options configured in the NewVerifier. Its purpose is to
+// determine whether the SignedEntity was created by a Sigstore deployment we
+// trust, as defined by keys in our TrustedMaterial.
+//
+// If the SignedEntity contains a MessageSignature, then the artifact or its
+// digest must be provided to the Verify function, as it is required to verify
+// the signature. See WithArtifact and WithArtifactDigest for more details.
+//
+// If and only if verification is successful, Verify will return a
+// VerificationResult struct whose contents' integrity have been verified.
+// Verify may then verify the contents of the VerificationResults using supplied
+// PolicyOptions. See WithCertificateIdentity for more details.
+//
+// Callers of this function SHOULD ALWAYS:
+// - (if the signed entity has a certificate) verify that its Subject Alternate
+// Name matches a trusted identity, and that its OID Issuer field matches an
+// expected value
+// - (if the signed entity has a dsse envelope) verify that the envelope's
+// statement's subject matches the artifact being verified
+func (v *Verifier) Verify(entity SignedEntity, pb PolicyBuilder) (*VerificationResult, error) {
+ policy, err := pb.BuildConfig()
+ if err != nil {
+ return nil, fmt.Errorf("failed to build policy: %w", err)
+ }
+
+ // Let's go by the spec: https://docs.google.com/document/d/1kbhK2qyPPk8SLavHzYSDM8-Ueul9_oxIMVFuWMWKz0E/edit#heading=h.g11ovq2s1jxh
+ // > ## Transparency Log Entry
+ verifiedTlogTimestamps, err := v.VerifyTransparencyLogInclusion(entity)
+ if err != nil {
+ return nil, fmt.Errorf("failed to verify log inclusion: %w", err)
+ }
+
+ // > ## Establishing a Time for the Signature
+ // > First, establish a time for the signature. This timestamp is required to validate the certificate chain, so this step comes first.
+ verifiedTimestamps, err := v.VerifyObserverTimestamps(entity, verifiedTlogTimestamps)
+ if err != nil {
+ return nil, fmt.Errorf("failed to verify timestamps: %w", err)
+ }
+
+ verificationContent, err := entity.VerificationContent()
+ if err != nil {
+ return nil, fmt.Errorf("failed to fetch verification content: %w", err)
+ }
+
+ var signedWithCertificate bool
+ var certSummary certificate.Summary
+
+ // If the bundle was signed with a long-lived key, and does not have a Fulcio certificate,
+ // then skip the certificate verification steps
+ if leafCert := verificationContent.Certificate(); leafCert != nil {
+ if policy.RequireSigningKey() {
+ return nil, errors.New("expected key signature, not certificate")
+ }
+ if v.config.allowNoTimestamp {
+ return nil, errors.New("must provide timestamp to verify certificate")
+ }
+
+ signedWithCertificate = true
+
+ // Get the summary before modifying the cert extensions
+ certSummary, err = certificate.SummarizeCertificate(leafCert)
+ if err != nil {
+ return nil, fmt.Errorf("failed to summarize certificate: %w", err)
+ }
+
+ // From spec:
+ // > ## Certificate
+ // > …
+ // > The Verifier MUST perform certification path validation (RFC 5280 §6) of the certificate chain with the pre-distributed Fulcio root certificate(s) as a trust anchor, but with a fake “current time.” If a timestamp from the timestamping service is available, the Verifier MUST perform path validation using the timestamp from the Timestamping Service. If a timestamp from the Transparency Service is available, the Verifier MUST perform path validation using the timestamp from the Transparency Service. If both are available, the Verifier performs path validation twice. If either fails, verification fails.
+
+ // Go does not support the OtherName GeneralName SAN extension. If
+ // Fulcio issued the certificate with an OtherName SAN, it will be
+ // handled by SummarizeCertificate above, and it must be removed here
+ // or the X.509 verification will fail.
+ if len(leafCert.UnhandledCriticalExtensions) > 0 {
+ var unhandledExts []asn1.ObjectIdentifier
+ for _, oid := range leafCert.UnhandledCriticalExtensions {
+ if !oid.Equal(cryptoutils.SANOID) {
+ unhandledExts = append(unhandledExts, oid)
+ }
+ }
+ leafCert.UnhandledCriticalExtensions = unhandledExts
+ }
+
+ var chains [][]*x509.Certificate
+ for _, verifiedTs := range verifiedTimestamps {
+ // verify the leaf certificate against the root
+ chains, err = VerifyLeafCertificate(verifiedTs.Timestamp, leafCert, v.trustedMaterial)
+ if err != nil {
+ return nil, fmt.Errorf("failed to verify leaf certificate: %w", err)
+ }
+ }
+
+ // From spec:
+ // > Unless performing online verification (see §Alternative Workflows), the Verifier MUST extract the SignedCertificateTimestamp embedded in the leaf certificate, and verify it as in RFC 9162 §8.1.3, using the verification key from the Certificate Transparency Log.
+
+ if v.config.requireSCTs {
+ err = VerifySignedCertificateTimestamp(chains, v.config.ctlogEntriesThreshold, v.trustedMaterial)
+ if err != nil {
+ return nil, fmt.Errorf("failed to verify signed certificate timestamp: %w", err)
+ }
+ }
+ }
+
+ // If SCTs are required, ensure the bundle is certificate-signed not public key-signed
+ if v.config.requireSCTs {
+ if verificationContent.PublicKey() != nil {
+ return nil, errors.New("SCTs required but bundle is signed with a public key, which cannot contain SCTs")
+ }
+ }
+
+ // From spec:
+ // > ## Signature Verification
+ // > The Verifier MUST verify the provided signature for the constructed payload against the key in the leaf of the certificate chain.
+
+ sigContent, err := entity.SignatureContent()
+ if err != nil {
+ return nil, fmt.Errorf("failed to fetch signature content: %w", err)
+ }
+
+ entityVersion, err := entity.Version()
+ if err != nil {
+ return nil, fmt.Errorf("failed to fetch entity version: %w", err)
+ }
+
+ var enableCompat bool
+ switch entityVersion {
+ case "v0.1":
+ fallthrough
+ case "0.1":
+ fallthrough
+ case "v0.2":
+ fallthrough
+ case "0.2":
+ fallthrough
+ case "v0.3":
+ fallthrough
+ case "0.3":
+ enableCompat = true
+ }
+ verifier, err := getSignatureVerifier(sigContent, verificationContent, v.trustedMaterial, enableCompat)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get signature verifier: %w", err)
+ }
+
+ if policy.RequireArtifact() {
+ switch {
+ case policy.verifyArtifacts:
+ err = verifySignatureWithVerifierAndArtifacts(verifier, sigContent, verificationContent, v.trustedMaterial, policy.artifacts)
+ case policy.verifyArtifactDigests:
+ err = verifySignatureWithVerifierAndArtifactDigests(verifier, sigContent, verificationContent, v.trustedMaterial, policy.artifactDigests)
+ default:
+ // should never happen, but just in case:
+ err = errors.New("no artifact or artifact digest provided")
+ }
+ } else {
+ // verifying with artifact has been explicitly turned off, so just check
+ // the signature on the dsse envelope:
+ err = verifySignatureWithVerifier(verifier, sigContent, verificationContent, v.trustedMaterial)
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("failed to verify signature: %w", err)
+ }
+
+ // Hooray! We've verified all of the entity's constituent parts! 🎉 🥳
+ // Now we can construct the results object accordingly.
+ result := NewVerificationResult()
+ if signedWithCertificate {
+ result.Signature = &SignatureVerificationResult{
+ Certificate: &certSummary,
+ }
+ } else {
+ pubKeyID := []byte(verificationContent.PublicKey().Hint())
+ result.Signature = &SignatureVerificationResult{
+ PublicKeyID: &pubKeyID,
+ }
+ }
+
+ // SignatureContent can be either an Envelope or a MessageSignature.
+ // If it's an Envelope, let's pop the Statement for our results:
+ if envelope := sigContent.EnvelopeContent(); envelope != nil {
+ stmt, err := envelope.Statement()
+ if err != nil {
+ return nil, fmt.Errorf("failed to fetch envelope statement: %w", err)
+ }
+
+ result.Statement = stmt
+ }
+
+ result.VerifiedTimestamps = verifiedTimestamps
+
+ // Now that the signed entity's crypto material has been verified, and the
+ // result struct has been constructed, we can optionally enforce some
+ // additional policies:
+ // --------------------
+
+ // From ## Certificate section,
+ // >The Verifier MUST then check the certificate against the verification policy. Details on how to do this depend on the verification policy, but the Verifier SHOULD check the Issuer X.509 extension (OID 1.3.6.1.4.1.57264.1.1) at a minimum, and will in most cases check the SubjectAlternativeName as well. See Spec: Fulcio §TODO for example checks on the certificate.
+ if policy.RequireIdentities() {
+ if !signedWithCertificate {
+ // We got asked to verify identities, but the entity was not signed with
+ // a certificate. That's a problem!
+ return nil, errors.New("can't verify certificate identities: entity was not signed with a certificate")
+ }
+
+ if len(policy.certificateIdentities) == 0 {
+ return nil, errors.New("can't verify certificate identities: no identities provided")
+ }
+
+ matchingCertID, err := policy.certificateIdentities.Verify(certSummary)
+ if err != nil {
+ return nil, fmt.Errorf("failed to verify certificate identity: %w", err)
+ }
+
+ result.VerifiedIdentity = matchingCertID
+ }
+
+ return result, nil
+}
+
+// VerifyTransparencyLogInclusion verifies TlogEntries if expected. Optionally returns
+// a list of verified timestamps from the log integrated timestamps when verifying
+// with observer timestamps.
+// TODO: Return a different verification result for logs specifically (also for #48)
+func (v *Verifier) VerifyTransparencyLogInclusion(entity SignedEntity) ([]TimestampVerificationResult, error) {
+ verifiedTimestamps := []TimestampVerificationResult{}
+
+ if v.config.requireTlogEntries {
+ // log timestamps should be verified if with WithIntegratedTimestamps or WithObserverTimestamps is used
+ verifiedTlogTimestamps, err := VerifyTlogEntry(entity, v.trustedMaterial, v.config.tlogEntriesThreshold,
+ v.config.requireIntegratedTimestamps || v.config.requireObserverTimestamps)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, vts := range verifiedTlogTimestamps {
+ verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "Tlog", URI: vts.URI, Timestamp: vts.Time})
+ }
+ }
+
+ return verifiedTimestamps, nil
+}
+
+// VerifyObserverTimestamps verifies RFC3161 signed timestamps, and verifies
+// that timestamp thresholds are met with log entry integrated timestamps,
+// signed timestamps, or a combination of both. The returned timestamps
+// can be used to verify short-lived certificates.
+// logTimestamps may be populated with verified log entry integrated timestamps
+// In order to be verifiable, a SignedEntity must have at least one verified
+// "observer timestamp".
+func (v *Verifier) VerifyObserverTimestamps(entity SignedEntity, logTimestamps []TimestampVerificationResult) ([]TimestampVerificationResult, error) {
+ verifiedTimestamps := []TimestampVerificationResult{}
+
+ // From spec:
+ // > … if verification or timestamp parsing fails, the Verifier MUST abort
+ if v.config.requireSignedTimestamps {
+ verifiedSignedTimestamps, err := VerifySignedTimestampWithThreshold(entity, v.trustedMaterial, v.config.signedTimestampThreshold)
+ if err != nil {
+ return nil, err
+ }
+ for _, vts := range verifiedSignedTimestamps {
+ verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "TimestampAuthority", URI: vts.URI, Timestamp: vts.Time})
+ }
+ }
+
+ if v.config.requireIntegratedTimestamps {
+ if len(logTimestamps) < v.config.integratedTimeThreshold {
+ return nil, fmt.Errorf("threshold not met for verified log entry integrated timestamps: %d < %d", len(logTimestamps), v.config.integratedTimeThreshold)
+ }
+ verifiedTimestamps = append(verifiedTimestamps, logTimestamps...)
+ }
+
+ if v.config.requireObserverTimestamps {
+ verifiedSignedTimestamps, verificationErrors, err := VerifySignedTimestamp(entity, v.trustedMaterial)
+ if err != nil {
+ return nil, fmt.Errorf("failed to verify signed timestamps: %w", err)
+ }
+
+ // check threshold for both RFC3161 and log timestamps
+ tsCount := len(verifiedSignedTimestamps) + len(logTimestamps)
+ if tsCount < v.config.observerTimestampThreshold {
+ return nil, fmt.Errorf("threshold not met for verified signed & log entry integrated timestamps: %d < %d; error: %w",
+ tsCount, v.config.observerTimestampThreshold, errors.Join(verificationErrors...))
+ }
+
+ // append all timestamps
+ verifiedTimestamps = append(verifiedTimestamps, logTimestamps...)
+ for _, vts := range verifiedSignedTimestamps {
+ verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "TimestampAuthority", URI: vts.URI, Timestamp: vts.Time})
+ }
+ }
+
+ if v.config.useCurrentTime {
+ // use current time to verify certificate if no signed timestamps are provided
+ verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "CurrentTime", URI: "", Timestamp: time.Now()})
+ }
+
+ if len(verifiedTimestamps) == 0 && !v.config.allowNoTimestamp {
+ return nil, fmt.Errorf("no valid observer timestamps found")
+ }
+
+ return verifiedTimestamps, nil
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/tlog.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/tlog.go
new file mode 100644
index 00000000000..eff84be07e1
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/tlog.go
@@ -0,0 +1,187 @@
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package verify
+
+import (
+ "bytes"
+ "crypto"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "net/url"
+ "regexp"
+ "strings"
+
+ "github.com/sigstore/sigstore-go/pkg/root"
+ "github.com/sigstore/sigstore-go/pkg/tlog"
+ "github.com/sigstore/sigstore/pkg/signature"
+)
+
+const maxAllowedTlogEntries = 32
+
+// VerifyTlogEntry verifies that the given entity has been logged
+// in the transparency log and that the log entry is valid.
+//
+// The threshold parameter is the number of unique transparency log entries
+// that must be verified.
+func VerifyTlogEntry(entity SignedEntity, trustedMaterial root.TrustedMaterial, logThreshold int, trustIntegratedTime bool) ([]root.Timestamp, error) { //nolint:revive
+ entries, err := entity.TlogEntries()
+ if err != nil {
+ return nil, err
+ }
+
+ // limit the number of tlog entries to prevent DoS
+ if len(entries) > maxAllowedTlogEntries {
+ return nil, fmt.Errorf("too many tlog entries: %d > %d", len(entries), maxAllowedTlogEntries)
+ }
+
+ // disallow duplicate entries, as a malicious actor could use duplicates to bypass the threshold
+ for i := 0; i < len(entries); i++ {
+ for j := i + 1; j < len(entries); j++ {
+ if entries[i].LogKeyID() == entries[j].LogKeyID() && entries[i].LogIndex() == entries[j].LogIndex() {
+ return nil, errors.New("duplicate tlog entries found")
+ }
+ }
+ }
+
+ sigContent, err := entity.SignatureContent()
+ if err != nil {
+ return nil, err
+ }
+
+ entitySignature := sigContent.Signature()
+
+ verificationContent, err := entity.VerificationContent()
+ if err != nil {
+ return nil, err
+ }
+
+ verifiedTimestamps := []root.Timestamp{}
+ logEntriesVerified := 0
+
+ for _, entry := range entries {
+ err := tlog.ValidateEntry(entry)
+ if err != nil {
+ return nil, err
+ }
+
+ rekorLogs := trustedMaterial.RekorLogs()
+ keyID := entry.LogKeyID()
+ hex64Key := hex.EncodeToString([]byte(keyID))
+ tlogVerifier, ok := trustedMaterial.RekorLogs()[hex64Key]
+ if !ok {
+ // skip entries the trust root cannot verify
+ continue
+ }
+
+ if !entry.HasInclusionPromise() && !entry.HasInclusionProof() {
+ return nil, fmt.Errorf("entry must contain an inclusion proof and/or promise")
+ }
+ if entry.HasInclusionPromise() {
+ err = tlog.VerifySET(entry, rekorLogs)
+ if err != nil {
+ // skip entries the trust root cannot verify
+ continue
+ }
+ if trustIntegratedTime {
+ verifiedTimestamps = append(verifiedTimestamps, root.Timestamp{Time: entry.IntegratedTime(), URI: tlogVerifier.BaseURL})
+ }
+ }
+ if entry.HasInclusionProof() {
+ verifier, err := getVerifier(tlogVerifier.PublicKey, tlogVerifier.SignatureHashFunc)
+ if err != nil {
+ return nil, err
+ }
+
+ if hasRekorV1STH(entry) {
+ err = tlog.VerifyInclusion(entry, *verifier)
+ } else {
+ if tlogVerifier.BaseURL == "" {
+ return nil, fmt.Errorf("cannot verify Rekor v2 entry without baseUrl in transparency log's trusted root")
+ }
+ u, err := url.Parse(tlogVerifier.BaseURL)
+ if err != nil {
+ return nil, err
+ }
+ err = tlog.VerifyCheckpointAndInclusion(entry, *verifier, u.Hostname())
+ if err != nil {
+ return nil, err
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ // DO NOT use timestamp with only an inclusion proof, because it is not signed metadata
+ }
+
+ // Ensure entry signature matches signature from bundle
+ if !bytes.Equal(entry.Signature(), entitySignature) {
+ return nil, errors.New("transparency log signature does not match")
+ }
+
+ // Ensure entry certificate matches bundle certificate
+ if !verificationContent.CompareKey(entry.PublicKey(), trustedMaterial) {
+ return nil, errors.New("transparency log certificate does not match")
+ }
+
+ // TODO: if you have access to artifact, check that it matches body subject
+
+ // Check tlog entry time against bundle certificates
+ if !entry.IntegratedTime().IsZero() {
+ if !verificationContent.ValidAtTime(entry.IntegratedTime(), trustedMaterial) {
+ return nil, errors.New("integrated time outside certificate validity")
+ }
+ }
+
+ // successful log entry verification
+ logEntriesVerified++
+ }
+
+ if logEntriesVerified < logThreshold {
+ return nil, fmt.Errorf("not enough verified log entries from transparency log: %d < %d", logEntriesVerified, logThreshold)
+ }
+
+ return verifiedTimestamps, nil
+}
+
+func getVerifier(publicKey crypto.PublicKey, hashFunc crypto.Hash) (*signature.Verifier, error) {
+ verifier, err := signature.LoadVerifier(publicKey, hashFunc)
+ if err != nil {
+ return nil, err
+ }
+
+ return &verifier, nil
+}
+
+// TODO: remove this deprecated function before 2.0
+
+// Deprecated: use VerifyTlogEntry instead
+func VerifyArtifactTransparencyLog(entity SignedEntity, trustedMaterial root.TrustedMaterial, logThreshold int, trustIntegratedTime bool) ([]root.Timestamp, error) { //nolint:revive
+ return VerifyTlogEntry(entity, trustedMaterial, logThreshold, trustIntegratedTime)
+}
+
+var treeIDSuffixRegex = regexp.MustCompile(".* - [0-9]+$")
+
+// hasRekorV1STH checks if the checkpoint has a Rekor v1-style Signed Tree Head
+// which contains a numeric Tree ID as part of its checkpoint origin.
+func hasRekorV1STH(entry *tlog.Entry) bool {
+ tle := entry.TransparencyLogEntry()
+ checkpointBody := tle.GetInclusionProof().GetCheckpoint().GetEnvelope()
+ checkpointLines := strings.Split(checkpointBody, "\n")
+ if len(checkpointLines) < 4 {
+ return false
+ }
+ return treeIDSuffixRegex.MatchString(checkpointLines[0])
+}
diff --git a/vendor/github.com/sigstore/sigstore-go/pkg/verify/tsa.go b/vendor/github.com/sigstore/sigstore-go/pkg/verify/tsa.go
new file mode 100644
index 00000000000..1c10445dd98
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore-go/pkg/verify/tsa.go
@@ -0,0 +1,120 @@
+// Copyright 2023 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package verify
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/sigstore/sigstore-go/pkg/root"
+)
+
+const maxAllowedTimestamps = 32
+
+// VerifySignedTimestamp verifies that the given entity has been timestamped
+// by a trusted timestamp authority and that the timestamp is valid.
+func VerifySignedTimestamp(entity SignedEntity, trustedMaterial root.TrustedMaterial) ([]*root.Timestamp, []error, error) { //nolint:revive
+ signedTimestamps, err := entity.Timestamps()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // limit the number of timestamps to prevent DoS
+ if len(signedTimestamps) > maxAllowedTimestamps {
+ return nil, nil, fmt.Errorf("too many signed timestamps: %d > %d", len(signedTimestamps), maxAllowedTimestamps)
+ }
+ sigContent, err := entity.SignatureContent()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ signatureBytes := sigContent.Signature()
+
+ verifiedTimestamps := []*root.Timestamp{}
+ var verificationErrors []error
+ for _, timestamp := range signedTimestamps {
+ verifiedSignedTimestamp, err := verifySignedTimestamp(timestamp, signatureBytes, trustedMaterial)
+ if err != nil {
+ verificationErrors = append(verificationErrors, err)
+ continue
+ }
+ if isDuplicateTSA(verifiedTimestamps, verifiedSignedTimestamp) {
+ verificationErrors = append(verificationErrors, fmt.Errorf("duplicate timestamps from the same authority, ignoring %s", verifiedSignedTimestamp.URI))
+ continue
+ }
+
+ verifiedTimestamps = append(verifiedTimestamps, verifiedSignedTimestamp)
+ }
+
+ return verifiedTimestamps, verificationErrors, nil
+}
+
+// isDuplicateTSA checks if the given verified signed timestamp is a duplicate
+// of any of the verified timestamps.
+// This is used to prevent replay attacks and ensure a single compromised TSA
+// cannot meet the threshold.
+func isDuplicateTSA(verifiedTimestamps []*root.Timestamp, verifiedSignedTimestamp *root.Timestamp) bool {
+ for _, ts := range verifiedTimestamps {
+ if ts.URI == verifiedSignedTimestamp.URI {
+ return true
+ }
+ }
+ return false
+}
+
+// VerifySignedTimestamp verifies that the given entity has been timestamped
+// by a trusted timestamp authority and that the timestamp is valid.
+//
+// The threshold parameter is the number of unique timestamps that must be
+// verified.
+func VerifySignedTimestampWithThreshold(entity SignedEntity, trustedMaterial root.TrustedMaterial, threshold int) ([]*root.Timestamp, error) { //nolint:revive
+ verifiedTimestamps, verificationErrors, err := VerifySignedTimestamp(entity, trustedMaterial)
+ if err != nil {
+ return nil, err
+ }
+ if len(verifiedTimestamps) < threshold {
+ return nil, fmt.Errorf("threshold not met for verified signed timestamps: %d < %d; error: %w", len(verifiedTimestamps), threshold, errors.Join(verificationErrors...))
+ }
+ return verifiedTimestamps, nil
+}
+
+func verifySignedTimestamp(signedTimestamp []byte, signatureBytes []byte, trustedMaterial root.TrustedMaterial) (*root.Timestamp, error) {
+ timestampAuthorities := trustedMaterial.TimestampingAuthorities()
+
+ var errs []error
+
+ // Iterate through TSA certificate authorities to find one that verifies
+ for _, tsa := range timestampAuthorities {
+ ts, err := tsa.Verify(signedTimestamp, signatureBytes)
+ if err == nil {
+ return ts, nil
+ }
+ errs = append(errs, err)
+ }
+
+ return nil, fmt.Errorf("unable to verify signed timestamps: %w", errors.Join(errs...))
+}
+
+// TODO: remove below deprecated functions before 2.0
+
+// Deprecated: use VerifySignedTimestamp instead.
+func VerifyTimestampAuthority(entity SignedEntity, trustedMaterial root.TrustedMaterial) ([]*root.Timestamp, []error, error) { //nolint:revive
+ return VerifySignedTimestamp(entity, trustedMaterial)
+}
+
+// Deprecated: use VerifySignedTimestampWithThreshold instead.
+func VerifyTimestampAuthorityWithThreshold(entity SignedEntity, trustedMaterial root.TrustedMaterial, threshold int) ([]*root.Timestamp, error) { //nolint:revive
+ return VerifySignedTimestampWithThreshold(entity, trustedMaterial, threshold)
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/adapters.go b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/adapters.go
new file mode 100644
index 00000000000..5f801095e3f
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/adapters.go
@@ -0,0 +1,77 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package dsse includes wrappers to support DSSE
+package dsse
+
+import (
+ "bytes"
+ "context"
+ "crypto"
+ "errors"
+
+ "github.com/sigstore/sigstore/pkg/signature"
+ "github.com/sigstore/sigstore/pkg/signature/options"
+)
+
+// SignerAdapter wraps a `sigstore/signature.Signer`, making it compatible with `go-securesystemslib/dsse.Signer`.
+type SignerAdapter struct {
+ SignatureSigner signature.Signer
+ Pub crypto.PublicKey
+ Opts []signature.SignOption
+ PubKeyID string
+}
+
+// Sign implements `go-securesystemslib/dsse.Signer`
+func (a *SignerAdapter) Sign(ctx context.Context, data []byte) ([]byte, error) {
+ return a.SignatureSigner.SignMessage(bytes.NewReader(data), append(a.Opts, options.WithContext(ctx))...)
+}
+
+// Verify disabled `go-securesystemslib/dsse.Verifier`
+func (a *SignerAdapter) Verify(_ context.Context, _, _ []byte) error {
+ return errors.New("Verify disabled")
+}
+
+// Public implements `go-securesystemslib/dsse.Verifier`
+func (a *SignerAdapter) Public() crypto.PublicKey {
+ return a.Pub
+}
+
+// KeyID implements `go-securesystemslib/dsse.Verifier`
+func (a SignerAdapter) KeyID() (string, error) {
+ return a.PubKeyID, nil
+}
+
+// VerifierAdapter wraps a `sigstore/signature.Verifier`, making it compatible with `go-securesystemslib/dsse.Verifier`.
+type VerifierAdapter struct {
+ SignatureVerifier signature.Verifier
+ Pub crypto.PublicKey
+ PubKeyID string
+}
+
+// Verify implements `go-securesystemslib/dsse.Verifier`
+func (a *VerifierAdapter) Verify(ctx context.Context, data, sig []byte) error {
+ return a.SignatureVerifier.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data), options.WithContext(ctx))
+}
+
+// Public implements `go-securesystemslib/dsse.Verifier`
+func (a *VerifierAdapter) Public() crypto.PublicKey {
+ return a.Pub
+}
+
+// KeyID implements `go-securesystemslib/dsse.Verifier`
+func (a *VerifierAdapter) KeyID() (string, error) {
+ return a.PubKeyID, nil
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/doc.go b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/doc.go
new file mode 100644
index 00000000000..f58f18068f0
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/doc.go
@@ -0,0 +1,17 @@
+//
+// Copyright 2022 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package dsse contains handlers for Dead Simple Signing Envelopes
+package dsse
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/dsse.go b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/dsse.go
new file mode 100644
index 00000000000..628e2413c0e
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/dsse.go
@@ -0,0 +1,152 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dsse
+
+import (
+ "bytes"
+ "context"
+ "crypto"
+ "encoding/base64"
+ "encoding/json"
+ "io"
+
+ "github.com/secure-systems-lab/go-securesystemslib/dsse"
+ "github.com/sigstore/sigstore/pkg/signature"
+)
+
+// WrapSigner returns a signature.Signer that uses the DSSE encoding format
+func WrapSigner(s signature.Signer, payloadType string) signature.Signer {
+ return &wrappedSigner{
+ s: s,
+ payloadType: payloadType,
+ }
+}
+
+type wrappedSigner struct {
+ s signature.Signer
+ payloadType string
+}
+
+// PublicKey returns the public key associated with the signer
+func (w *wrappedSigner) PublicKey(opts ...signature.PublicKeyOption) (crypto.PublicKey, error) {
+ return w.s.PublicKey(opts...)
+}
+
+// SignMessage signs the provided stream in the reader using the DSSE encoding format
+func (w *wrappedSigner) SignMessage(r io.Reader, opts ...signature.SignOption) ([]byte, error) {
+ p, err := io.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+ pae := dsse.PAE(w.payloadType, p)
+ sig, err := w.s.SignMessage(bytes.NewReader(pae), opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ env := dsse.Envelope{
+ PayloadType: w.payloadType,
+ Payload: base64.StdEncoding.EncodeToString(p),
+ Signatures: []dsse.Signature{
+ {
+ Sig: base64.StdEncoding.EncodeToString(sig),
+ },
+ },
+ }
+ return json.Marshal(env)
+}
+
+// WrapVerifier returns a signature.Verifier that uses the DSSE encoding format
+func WrapVerifier(v signature.Verifier) signature.Verifier {
+ return &wrappedVerifier{
+ v: v,
+ }
+}
+
+type wrappedVerifier struct {
+ v signature.Verifier
+}
+
+// PublicKey returns the public key associated with the verifier
+func (w *wrappedVerifier) PublicKey(opts ...signature.PublicKeyOption) (crypto.PublicKey, error) {
+ return w.v.PublicKey(opts...)
+}
+
+// VerifySignature verifies the signature specified in an DSSE envelope
+func (w *wrappedVerifier) VerifySignature(s, _ io.Reader, _ ...signature.VerifyOption) error {
+ sig, err := io.ReadAll(s)
+ if err != nil {
+ return err
+ }
+
+ env := dsse.Envelope{}
+ if err := json.Unmarshal(sig, &env); err != nil {
+ return err
+ }
+
+ pub, err := w.PublicKey()
+ if err != nil {
+ return err
+ }
+ verifier, err := dsse.NewEnvelopeVerifier(&VerifierAdapter{
+ SignatureVerifier: w.v,
+
+ Pub: pub,
+ PubKeyID: "", // We do not want to limit verification to a specific key.
+ })
+ if err != nil {
+ return err
+ }
+
+ _, err = verifier.Verify(context.Background(), &env)
+ return err
+}
+
+// WrapSignerVerifier returns a signature.SignerVerifier that uses the DSSE encoding format
+func WrapSignerVerifier(sv signature.SignerVerifier, payloadType string) signature.SignerVerifier {
+ signer := &wrappedSigner{
+ payloadType: payloadType,
+ s: sv,
+ }
+ verifier := &wrappedVerifier{
+ v: sv,
+ }
+
+ return &wrappedSignerVerifier{
+ signer: signer,
+ verifier: verifier,
+ }
+}
+
+type wrappedSignerVerifier struct {
+ signer *wrappedSigner
+ verifier *wrappedVerifier
+}
+
+// PublicKey returns the public key associated with the verifier
+func (w *wrappedSignerVerifier) PublicKey(opts ...signature.PublicKeyOption) (crypto.PublicKey, error) {
+ return w.signer.PublicKey(opts...)
+}
+
+// VerifySignature verifies the signature specified in an DSSE envelope
+func (w *wrappedSignerVerifier) VerifySignature(s, r io.Reader, opts ...signature.VerifyOption) error {
+ return w.verifier.VerifySignature(s, r, opts...)
+}
+
+// SignMessage signs the provided stream in the reader using the DSSE encoding format
+func (w *wrappedSignerVerifier) SignMessage(r io.Reader, opts ...signature.SignOption) ([]byte, error) {
+ return w.signer.SignMessage(r, opts...)
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/multidsse.go b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/multidsse.go
new file mode 100644
index 00000000000..34ebd5a41e8
--- /dev/null
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/multidsse.go
@@ -0,0 +1,186 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dsse
+
+import (
+ "context"
+ "crypto"
+ "encoding/json"
+ "errors"
+ "io"
+
+ "github.com/secure-systems-lab/go-securesystemslib/dsse"
+ "github.com/sigstore/sigstore/pkg/signature"
+)
+
+type wrappedMultiSigner struct {
+ sLAdapters []dsse.Signer
+ payloadType string
+}
+
+// WrapMultiSigner returns a signature.Signer that uses the DSSE encoding format
+func WrapMultiSigner(payloadType string, sL ...signature.Signer) signature.Signer {
+ signerAdapterL := make([]dsse.Signer, 0, len(sL))
+ for _, s := range sL {
+ pub, err := s.PublicKey()
+ if err != nil {
+ return nil
+ }
+
+ keyID, err := dsse.SHA256KeyID(pub)
+ if err != nil {
+ keyID = ""
+ }
+
+ signerAdapter := &SignerAdapter{
+ SignatureSigner: s,
+ Pub: s.PublicKey,
+ PubKeyID: keyID, // We do not want to limit verification to a specific key.
+ }
+
+ signerAdapterL = append(signerAdapterL, signerAdapter)
+ }
+
+ return &wrappedMultiSigner{
+ sLAdapters: signerAdapterL,
+ payloadType: payloadType,
+ }
+}
+
+// PublicKey returns the public key associated with the signer
+func (wL *wrappedMultiSigner) PublicKey(_ ...signature.PublicKeyOption) (crypto.PublicKey, error) {
+ return nil, errors.New("not supported for multi signatures")
+}
+
+// SignMessage signs the provided stream in the reader using the DSSE encoding format
+func (wL *wrappedMultiSigner) SignMessage(r io.Reader, _ ...signature.SignOption) ([]byte, error) {
+ p, err := io.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ envSigner, err := dsse.NewEnvelopeSigner(wL.sLAdapters...)
+ if err != nil {
+ return nil, err
+ }
+
+ env, err := envSigner.SignPayload(context.Background(), wL.payloadType, p)
+ if err != nil {
+ return nil, err
+ }
+
+ return json.Marshal(env)
+}
+
+type wrappedMultiVerifier struct {
+ vLAdapters []dsse.Verifier
+ threshold int
+ payloadType string
+}
+
+// WrapMultiVerifier returns a signature.Verifier that uses the DSSE encoding format
+func WrapMultiVerifier(payloadType string, threshold int, vL ...signature.Verifier) signature.Verifier {
+ verifierAdapterL := make([]dsse.Verifier, 0, len(vL))
+ for _, v := range vL {
+ pub, err := v.PublicKey()
+ if err != nil {
+ return nil
+ }
+
+ keyID, err := dsse.SHA256KeyID(pub)
+ if err != nil {
+ keyID = ""
+ }
+
+ verifierAdapter := &VerifierAdapter{
+ SignatureVerifier: v,
+ Pub: v.PublicKey,
+ PubKeyID: keyID, // We do not want to limit verification to a specific key.
+ }
+
+ verifierAdapterL = append(verifierAdapterL, verifierAdapter)
+ }
+
+ return &wrappedMultiVerifier{
+ vLAdapters: verifierAdapterL,
+ payloadType: payloadType,
+ threshold: threshold,
+ }
+}
+
+// PublicKey returns the public key associated with the signer
+func (wL *wrappedMultiVerifier) PublicKey(_ ...signature.PublicKeyOption) (crypto.PublicKey, error) {
+ return nil, errors.New("not supported for multi signatures")
+}
+
+// VerifySignature verifies the signature specified in an DSSE envelope
+func (wL *wrappedMultiVerifier) VerifySignature(s, _ io.Reader, _ ...signature.VerifyOption) error {
+ sig, err := io.ReadAll(s)
+ if err != nil {
+ return err
+ }
+
+ env := dsse.Envelope{}
+ if err := json.Unmarshal(sig, &env); err != nil {
+ return err
+ }
+
+ envVerifier, err := dsse.NewMultiEnvelopeVerifier(wL.threshold, wL.vLAdapters...)
+ if err != nil {
+ return err
+ }
+
+ _, err = envVerifier.Verify(context.Background(), &env)
+ return err
+}
+
+// WrapMultiSignerVerifier returns a signature.SignerVerifier that uses the DSSE encoding format
+func WrapMultiSignerVerifier(payloadType string, threshold int, svL ...signature.SignerVerifier) signature.SignerVerifier {
+ signerL := make([]signature.Signer, 0, len(svL))
+ verifierL := make([]signature.Verifier, 0, len(svL))
+ for _, sv := range svL {
+ signerL = append(signerL, sv)
+ verifierL = append(verifierL, sv)
+ }
+
+ sL := WrapMultiSigner(payloadType, signerL...)
+ vL := WrapMultiVerifier(payloadType, threshold, verifierL...)
+
+ return &wrappedMultiSignerVerifier{
+ signer: sL,
+ verifier: vL,
+ }
+}
+
+type wrappedMultiSignerVerifier struct {
+ signer signature.Signer
+ verifier signature.Verifier
+}
+
+// PublicKey returns the public key associated with the verifier
+func (w *wrappedMultiSignerVerifier) PublicKey(opts ...signature.PublicKeyOption) (crypto.PublicKey, error) {
+ return w.signer.PublicKey(opts...)
+}
+
+// VerifySignature verifies the signature specified in an DSSE envelope
+func (w *wrappedMultiSignerVerifier) VerifySignature(s, r io.Reader, opts ...signature.VerifyOption) error {
+ return w.verifier.VerifySignature(s, r, opts...)
+}
+
+// SignMessage signs the provided stream in the reader using the DSSE encoding format
+func (w *wrappedMultiSignerVerifier) SignMessage(r io.Reader, opts ...signature.SignOption) ([]byte, error) {
+ return w.signer.SignMessage(r, opts...)
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go
index 1122989ff65..50f432798d3 100644
--- a/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go
@@ -31,9 +31,6 @@ import (
"github.com/sigstore/sigstore/pkg/cryptoutils"
"github.com/sigstore/sigstore/pkg/signature/options"
-
- // these ensure we have the implementations loaded
- _ "golang.org/x/crypto/sha3"
)
// Signer creates digital signatures over a message using a specified key pair
diff --git a/vendor/github.com/sigstore/timestamp-authority/v2/CONTRIBUTORS.md b/vendor/github.com/sigstore/timestamp-authority/v2/CONTRIBUTORS.md
new file mode 100644
index 00000000000..016e119f445
--- /dev/null
+++ b/vendor/github.com/sigstore/timestamp-authority/v2/CONTRIBUTORS.md
@@ -0,0 +1,132 @@
+# Contributing
+
+When contributing to this repository, please first discuss the change you wish
+to make via an [issue](https://github.com/sigstore/timestamp-authority/issues).
+
+## Pull Request Process
+
+1. Create an [issue](https://github.com/sigstore/timestamp-authority/issues)
+ outlining the fix or feature.
+2. Fork the {project-name} repository to your own github account and clone it locally.
+3. Hack on your changes.
+4. Update the README.md with details of changes to any interface, this includes new environment
+ variables, exposed ports, useful file locations, CLI parameters and
+ new or changed configuration values.
+5. Correctly format your commit message see [Commit Messages](#commit-message-guidelines)
+ below.
+6. Ensure that CI passes, if it fails, fix the failures.
+7. Every pull request requires a review from the [core timestamp-authority team](https://github.com/orgs/github.com/sigstore/teams/tsa-codeowners)
+ before merging.
+8. If your pull request consists of more than one commit, please squash your
+ commits as described in [Squash Commits](#squash-commits)
+
+## Commit Message Guidelines
+
+We follow the commit formatting recommendations found on [Chris Beams' How to Write a Git Commit Message article](https://chris.beams.io/posts/git-commit/).
+
+Well formed commit messages not only help reviewers understand the nature of
+the Pull Request, but also assists the release process where commit messages
+are used to generate release notes.
+
+A good example of a commit message would be as follows:
+
+```text
+Summarize changes in around 50 characters or less
+
+More detailed explanatory text, if necessary. Wrap it to about 72
+characters or so. In some contexts, the first line is treated as the
+subject of the commit and the rest of the text as the body. The
+blank line separating the summary from the body is critical (unless
+you omit the body entirely); various tools like `log`, `shortlog`
+and `rebase` can get confused if you run the two together.
+
+Explain the problem that this commit is solving. Focus on why you
+are making this change as opposed to how (the code explains that).
+Are there side effects or other unintuitive consequences of this
+change? Here's the place to explain them.
+
+Further paragraphs come after blank lines.
+
+ - Bullet points are okay, too
+
+ - Typically a hyphen or asterisk is used for the bullet, preceded
+ by a single space, with blank lines in between, but conventions
+ vary here
+
+If you use an issue tracker, put references to them at the bottom,
+like this:
+
+Resolves: #123
+See also: #456, #789
+```
+
+Note the `Resolves #123` tag, this references the issue raised and allows us to
+ensure issues are associated and closed when a pull request is merged.
+
+Please refer to [the github help page on message types](https://help.github.com/articles/closing-issues-using-keywords/) for a complete list of issue references.
+
+## Squash Commits
+
+Should your pull request consist of more than one commit (perhaps due to
+a change being requested during the review cycle), please perform a git squash
+once a reviewer has approved your pull request.
+
+A squash can be performed as follows. Let's say you have the following commits:
+
+```text
+initial commit
+second commit
+final commit
+```
+
+Run the command below with the number set to the total commits you wish to
+squash (in our case 3 commits):
+
+```shell
+git rebase -i HEAD~3
+```
+
+You default text editor will then open up and you will see the following:
+
+```shell
+pick eb36612 initial commit
+pick 9ac8968 second commit
+pick a760569 final commit
+
+# Rebase eb1429f..a760569 onto eb1429f (3 commands)
+```
+
+We want to rebase on top of our first commit, so we change the other two commits
+to `squash`:
+
+```shell
+pick eb36612 initial commit
+squash 9ac8968 second commit
+squash a760569 final commit
+```
+
+After this, should you wish to update your commit message to better summarise
+all of your pull request, run:
+
+```shell
+git commit --amend
+```
+
+You will then need to force push (assuming your initial commit(s) were posted
+to github):
+
+```shell
+git push origin your-branch --force
+```
+
+Alternatively, a core member can squash your commits within Github.
+
+## DCO Signoff
+
+Make sure to sign the [Developer Certificate of
+Origin](https://git-scm.com/docs/git-commit#Documentation/git-commit.txt---signoff).
+
+## Code of Conduct
+
+Sigstore Timestamp-Authority adheres to and enforces the [Contributor Covenant](http://contributor-covenant.org/version/1/4/) Code of Conduct.
+Please take a moment to read the [CODE_OF_CONDUCT.md](/CODE_OF_CONDUCT.md) document.
diff --git a/vendor/github.com/sigstore/timestamp-authority/v2/COPYRIGHT.txt b/vendor/github.com/sigstore/timestamp-authority/v2/COPYRIGHT.txt
new file mode 100644
index 00000000000..5f2c003d6d1
--- /dev/null
+++ b/vendor/github.com/sigstore/timestamp-authority/v2/COPYRIGHT.txt
@@ -0,0 +1,13 @@
+Copyright 2022 The Sigstore Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/sigstore/timestamp-authority/v2/LICENSE b/vendor/github.com/sigstore/timestamp-authority/v2/LICENSE
new file mode 100644
index 00000000000..f49a4e16e68
--- /dev/null
+++ b/vendor/github.com/sigstore/timestamp-authority/v2/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/vendor/github.com/sigstore/timestamp-authority/v2/pkg/verification/verify.go b/vendor/github.com/sigstore/timestamp-authority/v2/pkg/verification/verify.go
new file mode 100644
index 00000000000..4f6c77c7925
--- /dev/null
+++ b/vendor/github.com/sigstore/timestamp-authority/v2/pkg/verification/verify.go
@@ -0,0 +1,340 @@
+//
+// Copyright 2022 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package verification
+
+import (
+ "bytes"
+ "crypto/x509"
+ "encoding/asn1"
+ "fmt"
+ "hash"
+ "io"
+ "math/big"
+
+ "github.com/digitorus/pkcs7"
+ "github.com/digitorus/timestamp"
+ "github.com/pkg/errors"
+)
+
+var (
+ // EKUOID is the Extended Key Usage OID, per RFC 5280
+ EKUOID = asn1.ObjectIdentifier{2, 5, 29, 37}
+)
+
+// VerifyOpts contains verification options for a RFC3161 timestamp
+type VerifyOpts struct {
+ // OID verifies that the TSR's OID has an expected value. Optional, used when
+ // an alternative OID was passed with a request to the TSA
+ OID asn1.ObjectIdentifier
+ // TSACertificate verifies that the TSR uses the TSACertificate as expected. Optional if the TSR contains the TSA certificate
+ TSACertificate *x509.Certificate
+ // Intermediates verifies the TSR's certificate. Optional, used for chain building
+ Intermediates []*x509.Certificate
+ // Roots is the set of trusted root certificates that verifies the TSR's certificate
+ Roots []*x509.Certificate
+ // Nonce verifies that the TSR contains the expected nonce. Optional, used when
+ // an optional nonce was passed with a request to the TSA
+ Nonce *big.Int
+ // CommonName verifies that the TSR certificate subject's Common Name matches the expected value. Optional
+ CommonName string
+}
+
+// Verify the TSR's certificate identifier matches a provided TSA certificate
+func verifyESSCertID(tsaCert *x509.Certificate, opts VerifyOpts) error {
+ if opts.TSACertificate == nil {
+ return nil
+ }
+
+ if !bytes.Equal(opts.TSACertificate.RawIssuer, tsaCert.RawIssuer) {
+ return fmt.Errorf("TSR cert issuer does not match provided TSA cert issuer")
+ }
+
+ if opts.TSACertificate.SerialNumber.Cmp(tsaCert.SerialNumber) != 0 {
+ return fmt.Errorf("TSR cert serial number does not match provided TSA cert serial number")
+ }
+
+ return nil
+}
+
+// Verify the leaf certificate's subject Common Name matches a provided Common Name
+func verifySubjectCommonName(cert *x509.Certificate, opts VerifyOpts) error {
+ if opts.CommonName == "" {
+ return nil
+ }
+
+ if cert.Subject.CommonName != opts.CommonName {
+ return fmt.Errorf("the certificate's subject Common Name %s does not match the provided Common Name %s", cert.Subject.CommonName, opts.CommonName)
+ }
+ return nil
+}
+
+// If embedded in the TSR, verify the TSR's leaf certificate matches a provided TSA certificate
+func verifyEmbeddedLeafCert(tsaCert *x509.Certificate, opts VerifyOpts) error {
+ if opts.TSACertificate != nil && !opts.TSACertificate.Equal(tsaCert) {
+ return fmt.Errorf("certificate embedded in the TSR does not match the provided TSA certificate")
+ }
+ return nil
+}
+
+// Verify the leaf's EKU is set to critical, per RFC 3161 2.3
+func verifyLeafCertCriticalEKU(cert *x509.Certificate) error {
+ var criticalEKU bool
+ for _, ext := range cert.Extensions {
+ if ext.Id.Equal(EKUOID) {
+ criticalEKU = ext.Critical
+ break
+ }
+ }
+ if !criticalEKU {
+ return errors.New("certificate must set EKU to critical")
+ }
+ return nil
+}
+
+func verifyLeafCert(ts timestamp.Timestamp, opts VerifyOpts) error {
+ if len(ts.Certificates) == 0 && opts.TSACertificate == nil {
+ return fmt.Errorf("leaf certificate must be present the in TSR or as a verify option")
+ }
+
+ errMsg := "failed to verify TSA certificate"
+
+ var leafCert *x509.Certificate
+ if len(ts.Certificates) != 0 {
+ for _, c := range ts.Certificates {
+ if !c.IsCA {
+ leafCert = c
+ break
+ }
+ }
+ if leafCert == nil {
+ return fmt.Errorf("no leaf certificate found in chain")
+ }
+
+ err := verifyEmbeddedLeafCert(leafCert, opts)
+ if err != nil {
+ return fmt.Errorf("%s: %w", errMsg, err)
+ }
+ } else {
+ leafCert = opts.TSACertificate
+ }
+
+ err := verifyLeafCertCriticalEKU(leafCert)
+ if err != nil {
+ return fmt.Errorf("%s: %w", errMsg, err)
+ }
+
+ err = verifyESSCertID(leafCert, opts)
+ if err != nil {
+ return fmt.Errorf("%s: %w", errMsg, err)
+ }
+
+ err = verifySubjectCommonName(leafCert, opts)
+ if err != nil {
+ return fmt.Errorf("%s: %w", errMsg, err)
+ }
+
+ // verifies that the leaf certificate and any intermediate certificates
+ // have EKU set to only time stamping usage
+ err = verifyLeafAndIntermediatesTimestampingEKU(leafCert, opts)
+ if err != nil {
+ return fmt.Errorf("failed to verify EKU on leaf certificate: %w", err)
+ }
+
+ return nil
+}
+
+func verifyLeafExtendedKeyUsage(cert *x509.Certificate) error {
+ certEKULen := len(cert.ExtKeyUsage)
+ if certEKULen != 1 {
+ return fmt.Errorf("certificate has %d extended key usages, expected only one", certEKULen)
+ }
+
+ if cert.ExtKeyUsage[0] != x509.ExtKeyUsageTimeStamping {
+ return fmt.Errorf("leaf certificate EKU is not set to TimeStamping as required")
+ }
+ return nil
+}
+
+func verifyIntermediateExtendedKeyUsage(cert *x509.Certificate) error {
+ // If no EKU specified it means unrestricted usage
+ if len(cert.ExtKeyUsage) == 0 {
+ return nil
+ }
+
+ allowsTimestampingUse := false
+ for _, eku := range cert.ExtKeyUsage {
+ if eku == x509.ExtKeyUsageTimeStamping || eku == x509.ExtKeyUsageAny {
+ allowsTimestampingUse = true
+ break
+ }
+ }
+
+ if !allowsTimestampingUse {
+ return errors.New("intermediate certificate does not allow Timestamping usage")
+ }
+
+ return nil
+}
+
+// Verify the leaf and intermediate certificates (called "EKU chaining") all
+// have the appropriate extended key usage set.
+// Leaf certificates must have exactly one EKU set to Timestamping
+// Intermediates can have no EKU (unrestricted) or multiple EKUs,
+// which need to include Timestamping or UsageAny.
+func verifyLeafAndIntermediatesTimestampingEKU(leafCert *x509.Certificate, opts VerifyOpts) error {
+ err := verifyLeafExtendedKeyUsage(leafCert)
+ if err != nil {
+ return fmt.Errorf("failed to verify EKU on leaf certificate: %w", err)
+ }
+
+ for _, cert := range opts.Intermediates {
+ err := verifyIntermediateExtendedKeyUsage(cert)
+ if err != nil {
+ return fmt.Errorf("failed to verify EKU on intermediate certificate: %w", err)
+ }
+ }
+ return nil
+}
+
+// Verify the OID of the TSR matches an expected OID
+func verifyOID(oid []int, opts VerifyOpts) error {
+ if opts.OID == nil {
+ return nil
+ }
+ responseOID := opts.OID
+ if len(oid) != len(responseOID) {
+ return fmt.Errorf("OID lengths do not match")
+ }
+ for i, v := range oid {
+ if v != responseOID[i] {
+ return fmt.Errorf("OID content does not match")
+ }
+ }
+ return nil
+}
+
+// Verify the nonce - Mostly important for when the response is first returned
+func verifyNonce(requestNonce *big.Int, opts VerifyOpts) error {
+ if opts.Nonce == nil {
+ return nil
+ }
+ if opts.Nonce.Cmp(requestNonce) != 0 {
+ return fmt.Errorf("incoming nonce %d does not match TSR nonce %d", requestNonce, opts.Nonce)
+ }
+ return nil
+}
+
+// VerifyTimestampResponse the timestamp response using a timestamp certificate chain.
+func VerifyTimestampResponse(tsrBytes []byte, artifact io.Reader, opts VerifyOpts) (*timestamp.Timestamp, error) {
+ // Verify the status of the TSR does not contain an error
+ // handled by the timestamp.ParseResponse function
+ ts, err := timestamp.ParseResponse(tsrBytes)
+ if err != nil {
+ pe := timestamp.ParseError("")
+ if errors.As(err, &pe) {
+ return nil, fmt.Errorf("timestamp response is not valid: %w", err)
+ }
+ return nil, fmt.Errorf("error parsing response into Timestamp: %w", err)
+ }
+
+ // verify the timestamp response signature using the provided certificate pool
+ if err = verifyTSRWithChain(ts, opts); err != nil {
+ return nil, err
+ }
+
+ if err = verifyNonce(ts.Nonce, opts); err != nil {
+ return nil, err
+ }
+
+ if err = verifyOID(ts.Policy, opts); err != nil {
+ return nil, err
+ }
+
+ if err = verifyLeafCert(*ts, opts); err != nil {
+ return nil, err
+ }
+
+ // verify the hash in the timestamp response matches the artifact hash
+ if err = verifyHashedMessages(ts.HashAlgorithm.New(), ts.HashedMessage, artifact); err != nil {
+ return nil, err
+ }
+
+ // if the parsed timestamp is verified, return the timestamp
+ return ts, nil
+}
+
+func verifyTSRWithChain(ts *timestamp.Timestamp, opts VerifyOpts) error {
+ p7Message, err := pkcs7.Parse(ts.RawToken)
+ if err != nil {
+ return fmt.Errorf("error parsing hashed message: %w", err)
+ }
+
+ if len(opts.Roots) == 0 {
+ return fmt.Errorf("no root certificates provided for verifying the certificate chain")
+ }
+ rootCertPool := x509.NewCertPool()
+ for _, cert := range opts.Roots {
+ if cert != nil {
+ rootCertPool.AddCert(cert)
+ }
+ }
+ if rootCertPool.Equal(x509.NewCertPool()) {
+ return fmt.Errorf("no valid root certificates provided for verifying the certificate chain")
+ }
+ intermediateCertPool := x509.NewCertPool()
+ for _, cert := range opts.Intermediates {
+ if cert != nil {
+ intermediateCertPool.AddCert(cert)
+ }
+ }
+
+ x509Opts := x509.VerifyOptions{
+ Roots: rootCertPool,
+ Intermediates: intermediateCertPool,
+ }
+
+ // if the PCKS7 object does not have any certificates set in the
+ // Certificates field, the VerifyWithChain method will because it will be
+ // unable to find a leaf certificate associated with a signer. Since the
+ // leaf certificate issuer and serial number information is already part of
+ // the PKCS7 object, adding the leaf certificate to the Certificates field
+ // will allow verification to pass
+ if p7Message.Certificates == nil && opts.TSACertificate != nil {
+ p7Message.Certificates = []*x509.Certificate{opts.TSACertificate}
+ }
+
+ err = p7Message.VerifyWithOpts(x509Opts)
+ if err != nil {
+ return fmt.Errorf("error while verifying with chain: %w", err)
+ }
+
+ return nil
+}
+
+// Verify that the TSR's hashed message matches the digest of the artifact to be timestamped
+func verifyHashedMessages(hashAlg hash.Hash, hashedMessage []byte, artifactReader io.Reader) error {
+ h := hashAlg
+ if _, err := io.Copy(h, artifactReader); err != nil {
+ return fmt.Errorf("failed to create hash %w", err)
+ }
+ localHashedMsg := h.Sum(nil)
+
+ if !bytes.Equal(localHashedMsg, hashedMessage) {
+ return fmt.Errorf("hashed messages don't match")
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/sigstore/timestamp-authority/v2/pkg/verification/verify_request.go b/vendor/github.com/sigstore/timestamp-authority/v2/pkg/verification/verify_request.go
new file mode 100644
index 00000000000..ba7511d7551
--- /dev/null
+++ b/vendor/github.com/sigstore/timestamp-authority/v2/pkg/verification/verify_request.go
@@ -0,0 +1,47 @@
+// Copyright 2022 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package verification
+
+import (
+ "crypto"
+ "fmt"
+
+ "github.com/digitorus/timestamp"
+ "github.com/pkg/errors"
+)
+
+var ErrWeakHashAlg = errors.New("weak hash algorithm: must be SHA-256, SHA-384, or SHA-512")
+var ErrUnsupportedHashAlg = errors.New("unsupported hash algorithm")
+var ErrInconsistentDigestLength = errors.New("digest length inconsistent with specified hash algorithm")
+
+func VerifyRequest(ts *timestamp.Request) error {
+ // only SHA-1, SHA-256, SHA-384, and SHA-512 are supported by the underlying library
+ switch ts.HashAlgorithm {
+ case crypto.SHA1:
+ return ErrWeakHashAlg
+ case crypto.SHA256, crypto.SHA384, crypto.SHA512:
+ default:
+ return ErrUnsupportedHashAlg
+ }
+
+ expectedDigestLength := ts.HashAlgorithm.Size()
+ actualDigestLength := len(ts.HashedMessage)
+
+ if actualDigestLength != expectedDigestLength {
+ return fmt.Errorf("%w: expected %d bytes, got %d bytes", ErrInconsistentDigestLength, expectedDigestLength, actualDigestLength)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/.golangci.yml b/vendor/github.com/sirupsen/logrus/.golangci.yml
index 65dc2850377..792db361813 100644
--- a/vendor/github.com/sirupsen/logrus/.golangci.yml
+++ b/vendor/github.com/sirupsen/logrus/.golangci.yml
@@ -1,40 +1,67 @@
+version: "2"
run:
- # do not run on test files yet
tests: false
-
-# all available settings of specific linters
-linters-settings:
- errcheck:
- # report about not checking of errors in type assetions: `a := b.(MyStruct)`;
- # default is false: such cases aren't reported by default.
- check-type-assertions: false
-
- # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
- # default is false: such cases aren't reported by default.
- check-blank: false
-
- lll:
- line-length: 100
- tab-width: 4
-
- prealloc:
- simple: false
- range-loops: false
- for-loops: false
-
- whitespace:
- multi-if: false # Enforces newlines (or comments) after every multi-line if statement
- multi-func: false # Enforces newlines (or comments) after every multi-line function signature
-
linters:
enable:
- - megacheck
- - govet
+ - asasalint
+ - asciicheck
+ - bidichk
+ - bodyclose
+ - contextcheck
+ - durationcheck
+ - errchkjson
+ - errorlint
+ - exhaustive
+ - gocheckcompilerdirectives
+ - gochecksumtype
+ - gosec
+ - gosmopolitan
+ - loggercheck
+ - makezero
+ - musttag
+ - nilerr
+ - nilnesserr
+ - noctx
+ - protogetter
+ - reassign
+ - recvcheck
+ - rowserrcheck
+ - spancheck
+ - sqlclosecheck
+ - testifylint
+ - unparam
+ - zerologlint
disable:
- - maligned
- prealloc
- disable-all: false
- presets:
- - bugs
- - unused
- fast: false
+ settings:
+ errcheck:
+ check-type-assertions: false
+ check-blank: false
+ lll:
+ line-length: 100
+ tab-width: 4
+ prealloc:
+ simple: false
+ range-loops: false
+ for-loops: false
+ whitespace:
+ multi-if: false
+ multi-func: false
+ exclusions:
+ generated: lax
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+formatters:
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
index 7567f612898..098608ff4b4 100644
--- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md
+++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
@@ -37,7 +37,7 @@ Features:
# 1.6.0
Fixes:
* end of line cleanup
- * revert the entry concurrency bug fix whic leads to deadlock under some circumstances
+ * revert the entry concurrency bug fix which leads to deadlock under some circumstances
* update dependency on go-windows-terminal-sequences to fix a crash with go 1.14
Features:
@@ -129,7 +129,7 @@ This new release introduces:
which is mostly useful for logger wrapper
* a fix reverting the immutability of the entry given as parameter to the hooks
a new configuration field of the json formatter in order to put all the fields
- in a nested dictionnary
+ in a nested dictionary
* a new SetOutput method in the Logger
* a new configuration of the textformatter to configure the name of the default keys
* a new configuration of the text formatter to disable the level truncation
diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md
index d1d4a85fd75..cc5dab7eb78 100644
--- a/vendor/github.com/sirupsen/logrus/README.md
+++ b/vendor/github.com/sirupsen/logrus/README.md
@@ -1,4 +1,4 @@
-# Logrus
[](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [](https://travis-ci.org/sirupsen/logrus) [](https://pkg.go.dev/github.com/sirupsen/logrus)
+# Logrus
[](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [](https://pkg.go.dev/github.com/sirupsen/logrus)
Logrus is a structured logger for Go (golang), completely API compatible with
the standard library logger.
@@ -40,7 +40,7 @@ plain text):

-With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
+With `logrus.SetFormatter(&logrus.JSONFormatter{})`, for easy parsing by logstash
or Splunk:
```text
@@ -60,9 +60,9 @@ ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
```
-With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
+With the default `logrus.SetFormatter(&logrus.TextFormatter{})` when a TTY is not
attached, the output is compatible with the
-[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+[logfmt](https://pkg.go.dev/github.com/kr/logfmt) format:
```text
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
@@ -75,17 +75,18 @@ time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x20822
To ensure this behaviour even if a TTY is attached, set your formatter as follows:
```go
- log.SetFormatter(&log.TextFormatter{
- DisableColors: true,
- FullTimestamp: true,
- })
+logrus.SetFormatter(&logrus.TextFormatter{
+ DisableColors: true,
+ FullTimestamp: true,
+})
```
#### Logging Method Name
If you wish to add the calling method as a field, instruct the logger via:
+
```go
-log.SetReportCaller(true)
+logrus.SetReportCaller(true)
```
This adds the caller as 'method' like so:
@@ -100,11 +101,11 @@ time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcr
Note that this does add measurable overhead - the cost will depend on the version of Go, but is
between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your
environment via benchmarks:
-```
+
+```bash
go test -bench=.*CallerTracing
```
-
#### Case-sensitivity
The organization's name was changed to lower-case--and this will not be changed
@@ -118,12 +119,10 @@ The simplest way to use Logrus is simply the package-level exported logger:
```go
package main
-import (
- log "github.com/sirupsen/logrus"
-)
+import "github.com/sirupsen/logrus"
func main() {
- log.WithFields(log.Fields{
+ logrus.WithFields(logrus.Fields{
"animal": "walrus",
}).Info("A walrus appears")
}
@@ -139,6 +138,7 @@ package main
import (
"os"
+
log "github.com/sirupsen/logrus"
)
@@ -190,26 +190,27 @@ package main
import (
"os"
+
"github.com/sirupsen/logrus"
)
// Create a new instance of the logger. You can have any number of instances.
-var log = logrus.New()
+var logger = logrus.New()
func main() {
// The API for setting attributes is a little different than the package level
- // exported logger. See Godoc.
- log.Out = os.Stdout
+ // exported logger. See Godoc.
+ logger.Out = os.Stdout
// You could set this to any `io.Writer` such as a file
// file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
// if err == nil {
- // log.Out = file
+ // logger.Out = file
// } else {
- // log.Info("Failed to log to file, using default stderr")
+ // logger.Info("Failed to log to file, using default stderr")
// }
- log.WithFields(logrus.Fields{
+ logger.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
@@ -219,12 +220,12 @@ func main() {
#### Fields
Logrus encourages careful, structured logging through logging fields instead of
-long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+long, unparseable error messages. For example, instead of: `logrus.Fatalf("Failed
to send event %s to topic %s with key %d")`, you should log the much more
discoverable:
```go
-log.WithFields(log.Fields{
+logrus.WithFields(logrus.Fields{
"event": event,
"topic": topic,
"key": key,
@@ -245,12 +246,12 @@ seen as a hint you should add a field, however, you can still use the
Often it's helpful to have fields _always_ attached to log statements in an
application or parts of one. For example, you may want to always log the
`request_id` and `user_ip` in the context of a request. Instead of writing
-`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
+`logger.WithFields(logrus.Fields{"request_id": request_id, "user_ip": user_ip})` on
every line, you can create a `logrus.Entry` to pass around instead:
```go
-requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
-requestLogger.Info("something happened on that request") # will log request_id and user_ip
+requestLogger := logger.WithFields(logrus.Fields{"request_id": request_id, "user_ip": user_ip})
+requestLogger.Info("something happened on that request") // will log request_id and user_ip
requestLogger.Warn("something not great happened")
```
@@ -264,28 +265,31 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
`init`:
```go
+package main
+
import (
- log "github.com/sirupsen/logrus"
- "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake"
- logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
"log/syslog"
+
+ "github.com/sirupsen/logrus"
+ airbrake "gopkg.in/gemnasium/logrus-airbrake-hook.v2"
+ logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
)
func init() {
// Use the Airbrake hook to report errors that have Error severity or above to
// an exception tracker. You can create custom hooks, see the Hooks section.
- log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+ logrus.AddHook(airbrake.NewHook(123, "xyz", "production"))
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err != nil {
- log.Error("Unable to connect to local syslog daemon")
+ logrus.Error("Unable to connect to local syslog daemon")
} else {
- log.AddHook(hook)
+ logrus.AddHook(hook)
}
}
```
-Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
+Note: Syslog hooks also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks)
@@ -295,15 +299,15 @@ A list of currently known service hooks can be found in this wiki [page](https:/
Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic.
```go
-log.Trace("Something very low level.")
-log.Debug("Useful debugging information.")
-log.Info("Something noteworthy happened!")
-log.Warn("You should probably take a look at this.")
-log.Error("Something failed but I'm not quitting.")
+logrus.Trace("Something very low level.")
+logrus.Debug("Useful debugging information.")
+logrus.Info("Something noteworthy happened!")
+logrus.Warn("You should probably take a look at this.")
+logrus.Error("Something failed but I'm not quitting.")
// Calls os.Exit(1) after logging
-log.Fatal("Bye.")
+logrus.Fatal("Bye.")
// Calls panic() after logging
-log.Panic("I'm bailing.")
+logrus.Panic("I'm bailing.")
```
You can set the logging level on a `Logger`, then it will only log entries with
@@ -311,13 +315,13 @@ that severity or anything above it:
```go
// Will log anything that is info or above (warn, error, fatal, panic). Default.
-log.SetLevel(log.InfoLevel)
+logrus.SetLevel(logrus.InfoLevel)
```
-It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+It may be useful to set `logrus.Level = logrus.DebugLevel` in a debug or verbose
environment if your application has that.
-Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging).
+Note: If you want different log levels for global (`logrus.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging).
#### Entries
@@ -340,17 +344,17 @@ could do:
```go
import (
- log "github.com/sirupsen/logrus"
+ "github.com/sirupsen/logrus"
)
func init() {
// do something here to set environment depending on an environment variable
// or command-line flag
if Environment == "production" {
- log.SetFormatter(&log.JSONFormatter{})
+ logrus.SetFormatter(&logrus.JSONFormatter{})
} else {
// The TextFormatter is default, you don't actually have to do this.
- log.SetFormatter(&log.TextFormatter{})
+ logrus.SetFormatter(&logrus.TextFormatter{})
}
}
```
@@ -372,11 +376,11 @@ The built-in logging formatters are:
* When colors are enabled, levels are truncated to 4 characters by default. To disable
truncation set the `DisableLevelTruncation` field to `true`.
* When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text.
- * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
+ * All options are listed in the [generated docs](https://pkg.go.dev/github.com/sirupsen/logrus#TextFormatter).
* `logrus.JSONFormatter`. Logs fields as JSON.
- * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
+ * All options are listed in the [generated docs](https://pkg.go.dev/github.com/sirupsen/logrus#JSONFormatter).
-Third party logging formatters:
+Third-party logging formatters:
* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine.
* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html).
@@ -384,7 +388,7 @@ Third party logging formatters:
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo.
* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure.
-* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files.
+* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Save log to files.
* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added.
You can define your formatter by implementing the `Formatter` interface,
@@ -393,10 +397,9 @@ requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
default ones (see Entries section above):
```go
-type MyJSONFormatter struct {
-}
+type MyJSONFormatter struct{}
-log.SetFormatter(new(MyJSONFormatter))
+logrus.SetFormatter(new(MyJSONFormatter))
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
// Note this doesn't include Time, Level and Message which are available on
@@ -455,17 +458,18 @@ entries. It should not be a feature of the application-level logger.
#### Testing
-Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
+Logrus has a built-in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
```go
import(
+ "testing"
+
"github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/assert"
- "testing"
)
func TestSomething(t*testing.T){
@@ -486,15 +490,15 @@ func TestSomething(t*testing.T){
Logrus can register one or more functions that will be called when any `fatal`
level message is logged. The registered handlers will be executed before
logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need
-to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
+to gracefully shut down. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
-```
-...
+```go
+// ...
handler := func() {
- // gracefully shutdown something...
+ // gracefully shut down something...
}
logrus.RegisterExitHandler(handler)
-...
+// ...
```
#### Thread safety
@@ -502,7 +506,7 @@ logrus.RegisterExitHandler(handler)
By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs.
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
-Situation when locking is not needed includes:
+Situations when locking is not needed include:
* You have no hooks registered, or hooks calling is already thread-safe.
diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml
index df9d65c3a5b..e90f09ea68c 100644
--- a/vendor/github.com/sirupsen/logrus/appveyor.yml
+++ b/vendor/github.com/sirupsen/logrus/appveyor.yml
@@ -1,14 +1,12 @@
-version: "{build}"
+# Minimal stub to satisfy AppVeyor CI
+version: 1.0.{build}
platform: x64
-clone_folder: c:\gopath\src\github.com\sirupsen\logrus
-environment:
- GOPATH: c:\gopath
+shallow_clone: true
+
branches:
only:
- master
-install:
- - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- - go version
+ - main
+
build_script:
- - go get -t
- - go test
+ - echo "No-op build to satisfy AppVeyor CI"
diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go
index 71cdbbc35d2..71d796d0b13 100644
--- a/vendor/github.com/sirupsen/logrus/entry.go
+++ b/vendor/github.com/sirupsen/logrus/entry.go
@@ -34,13 +34,15 @@ func init() {
minimumCallerDepth = 1
}
-// Defines the key when adding errors using WithError.
+// ErrorKey defines the key when adding errors using [WithError], [Logger.WithError].
var ErrorKey = "error"
-// An entry is the final or intermediate Logrus logging entry. It contains all
+// Entry is the final or intermediate Logrus logging entry. It contains all
// the fields passed with WithField{,s}. It's finally logged when Trace, Debug,
// Info, Warn, Error, Fatal or Panic is called on it. These objects can be
// reused and passed around as much as you wish to avoid field duplication.
+//
+//nolint:recvcheck // the methods of "Entry" use pointer receiver and non-pointer receiver.
type Entry struct {
Logger *Logger
@@ -86,12 +88,12 @@ func (entry *Entry) Dup() *Entry {
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err}
}
-// Returns the bytes representation of this entry from the formatter.
+// Bytes returns the bytes representation of this entry from the formatter.
func (entry *Entry) Bytes() ([]byte, error) {
return entry.Logger.Formatter.Format(entry)
}
-// Returns the string representation from the reader and ultimately the
+// String returns the string representation from the reader and ultimately the
// formatter.
func (entry *Entry) String() (string, error) {
serialized, err := entry.Bytes()
@@ -102,12 +104,13 @@ func (entry *Entry) String() (string, error) {
return str, nil
}
-// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+// WithError adds an error as single field (using the key defined in [ErrorKey])
+// to the Entry.
func (entry *Entry) WithError(err error) *Entry {
return entry.WithField(ErrorKey, err)
}
-// Add a context to the Entry.
+// WithContext adds a context to the Entry.
func (entry *Entry) WithContext(ctx context.Context) *Entry {
dataCopy := make(Fields, len(entry.Data))
for k, v := range entry.Data {
@@ -116,12 +119,12 @@ func (entry *Entry) WithContext(ctx context.Context) *Entry {
return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx}
}
-// Add a single field to the Entry.
+// WithField adds a single field to the Entry.
func (entry *Entry) WithField(key string, value interface{}) *Entry {
return entry.WithFields(Fields{key: value})
}
-// Add a map of fields to the Entry.
+// WithFields adds a map of fields to the Entry.
func (entry *Entry) WithFields(fields Fields) *Entry {
data := make(Fields, len(entry.Data)+len(fields))
for k, v := range entry.Data {
@@ -150,7 +153,7 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context}
}
-// Overrides the time of the Entry.
+// WithTime overrides the time of the Entry.
func (entry *Entry) WithTime(t time.Time) *Entry {
dataCopy := make(Fields, len(entry.Data))
for k, v := range entry.Data {
@@ -204,7 +207,7 @@ func getCaller() *runtime.Frame {
// If the caller isn't part of this package, we're done
if pkg != logrusPackage {
- return &f //nolint:scopelint
+ return &f
}
}
@@ -432,7 +435,7 @@ func (entry *Entry) Panicln(args ...interface{}) {
entry.Logln(PanicLevel, args...)
}
-// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// sprintlnn => Sprint no newline. This is to get the behavior of how
// fmt.Sprintln where spaces are always added between operands, regardless of
// their type. Instead of vendoring the Sprintln implementation to spare a
// string allocation, we do the simplest thing.
diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go
index 3f151cdc392..9ab978a4578 100644
--- a/vendor/github.com/sirupsen/logrus/hooks.go
+++ b/vendor/github.com/sirupsen/logrus/hooks.go
@@ -1,16 +1,16 @@
package logrus
-// A hook to be fired when logging on the logging levels returned from
-// `Levels()` on your implementation of the interface. Note that this is not
+// Hook describes hooks to be fired when logging on the logging levels returned from
+// [Hook.Levels] on your implementation of the interface. Note that this is not
// fired in a goroutine or a channel with workers, you should handle such
-// functionality yourself if your call is non-blocking and you don't wish for
+// functionality yourself if your call is non-blocking, and you don't wish for
// the logging calls for levels returned from `Levels()` to block.
type Hook interface {
Levels() []Level
Fire(*Entry) error
}
-// Internal type for storing the hooks on a logger instance.
+// LevelHooks is an internal type for storing the hooks on a logger instance.
type LevelHooks map[Level][]Hook
// Add a hook to an instance of logger. This is called with
diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go
index 5ff0aef6d3f..f5b8c439ee8 100644
--- a/vendor/github.com/sirupsen/logrus/logger.go
+++ b/vendor/github.com/sirupsen/logrus/logger.go
@@ -72,16 +72,16 @@ func (mw *MutexWrap) Disable() {
mw.disabled = true
}
-// Creates a new logger. Configuration should be set by changing `Formatter`,
-// `Out` and `Hooks` directly on the default logger instance. You can also just
+// New Creates a new logger. Configuration should be set by changing [Formatter],
+// Out and Hooks directly on the default Logger instance. You can also just
// instantiate your own:
//
-// var log = &logrus.Logger{
-// Out: os.Stderr,
-// Formatter: new(logrus.TextFormatter),
-// Hooks: make(logrus.LevelHooks),
-// Level: logrus.DebugLevel,
-// }
+// var log = &logrus.Logger{
+// Out: os.Stderr,
+// Formatter: new(logrus.TextFormatter),
+// Hooks: make(logrus.LevelHooks),
+// Level: logrus.DebugLevel,
+// }
//
// It's recommended to make this a global instance called `log`.
func New() *Logger {
@@ -118,30 +118,30 @@ func (logger *Logger) WithField(key string, value interface{}) *Entry {
return entry.WithField(key, value)
}
-// Adds a struct of fields to the log entry. All it does is call `WithField` for
-// each `Field`.
+// WithFields adds a struct of fields to the log entry. It calls [Entry.WithField]
+// for each Field.
func (logger *Logger) WithFields(fields Fields) *Entry {
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithFields(fields)
}
-// Add an error as single field to the log entry. All it does is call
-// `WithError` for the given `error`.
+// WithError adds an error as single field to the log entry. It calls
+// [Entry.WithError] for the given error.
func (logger *Logger) WithError(err error) *Entry {
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithError(err)
}
-// Add a context to the log entry.
+// WithContext add a context to the log entry.
func (logger *Logger) WithContext(ctx context.Context) *Entry {
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithContext(ctx)
}
-// Overrides the time of the log entry.
+// WithTime overrides the time of the log entry.
func (logger *Logger) WithTime(t time.Time) *Entry {
entry := logger.newEntry()
defer logger.releaseEntry(entry)
@@ -347,9 +347,9 @@ func (logger *Logger) Exit(code int) {
logger.ExitFunc(code)
}
-//When file is opened with appending mode, it's safe to
-//write concurrently to a file (within 4k message on Linux).
-//In these cases user can choose to disable the lock.
+// SetNoLock disables the lock for situations where a file is opened with
+// appending mode, and safe for concurrent writes to the file (within 4k
+// message on Linux). In these cases user can choose to disable the lock.
func (logger *Logger) SetNoLock() {
logger.mu.Disable()
}
diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go
index 2f16224cb9f..37fc4fef85a 100644
--- a/vendor/github.com/sirupsen/logrus/logrus.go
+++ b/vendor/github.com/sirupsen/logrus/logrus.go
@@ -6,13 +6,15 @@ import (
"strings"
)
-// Fields type, used to pass to `WithFields`.
+// Fields type, used to pass to [WithFields].
type Fields map[string]interface{}
// Level type
+//
+//nolint:recvcheck // the methods of "Entry" use pointer receiver and non-pointer receiver.
type Level uint32
-// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+// Convert the Level to a string. E.g. [PanicLevel] becomes "panic".
func (level Level) String() string {
if b, err := level.MarshalText(); err == nil {
return string(b)
@@ -77,7 +79,7 @@ func (level Level) MarshalText() ([]byte, error) {
return nil, fmt.Errorf("not a valid logrus level %d", level)
}
-// A constant exposing all logging levels
+// AllLevels exposing all logging levels.
var AllLevels = []Level{
PanicLevel,
FatalLevel,
@@ -119,8 +121,8 @@ var (
)
// StdLogger is what your logrus-enabled library should take, that way
-// it'll accept a stdlib logger and a logrus logger. There's no standard
-// interface, this is the closest we get, unfortunately.
+// it'll accept a stdlib logger ([log.Logger]) and a logrus logger.
+// There's no standard interface, so this is the closest we get, unfortunately.
type StdLogger interface {
Print(...interface{})
Printf(string, ...interface{})
@@ -135,7 +137,8 @@ type StdLogger interface {
Panicln(...interface{})
}
-// The FieldLogger interface generalizes the Entry and Logger types
+// FieldLogger extends the [StdLogger] interface, generalizing
+// the [Entry] and [Logger] types.
type FieldLogger interface {
WithField(key string, value interface{}) *Entry
WithFields(fields Fields) *Entry
@@ -176,8 +179,9 @@ type FieldLogger interface {
// IsPanicEnabled() bool
}
-// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is
-// here for consistancy. Do not use. Use Logger or Entry instead.
+// Ext1FieldLogger (the first extension to [FieldLogger]) is superfluous, it is
+// here for consistency. Do not use. Use [FieldLogger], [Logger] or [Entry]
+// instead.
type Ext1FieldLogger interface {
FieldLogger
Tracef(format string, args ...interface{})
diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go
index be2c6efe5ed..6dfeb18b10e 100644
--- a/vendor/github.com/sirupsen/logrus/text_formatter.go
+++ b/vendor/github.com/sirupsen/logrus/text_formatter.go
@@ -306,6 +306,7 @@ func (f *TextFormatter) needsQuoting(text string) bool {
return false
}
for _, ch := range text {
+ //nolint:staticcheck // QF1001: could apply De Morgan's law
if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') ||
@@ -334,6 +335,6 @@ func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
if !f.needsQuoting(stringVal) {
b.WriteString(stringVal)
} else {
- b.WriteString(fmt.Sprintf("%q", stringVal))
+ fmt.Fprintf(b, "%q", stringVal)
}
}
diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/LICENSE b/vendor/github.com/theupdateframework/go-tuf/v2/LICENSE
new file mode 100644
index 00000000000..85541be2e1b
--- /dev/null
+++ b/vendor/github.com/theupdateframework/go-tuf/v2/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2024 The Update Framework Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/NOTICE b/vendor/github.com/theupdateframework/go-tuf/v2/NOTICE
new file mode 100644
index 00000000000..09005219963
--- /dev/null
+++ b/vendor/github.com/theupdateframework/go-tuf/v2/NOTICE
@@ -0,0 +1,9 @@
+Copyright 2024 The Update Framework Authors
+
+Apache 2.0 License
+Copyright 2024 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (/).
+
+SPDX-License-Identifier: Apache-2.0
diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/config/config.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/config/config.go
new file mode 100644
index 00000000000..2123e57dfc3
--- /dev/null
+++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/config/config.go
@@ -0,0 +1,144 @@
+// Copyright 2024 The Update Framework Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+
+package config
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "os"
+ "time"
+
+ "github.com/cenkalti/backoff/v5"
+ "github.com/theupdateframework/go-tuf/v2/metadata/fetcher"
+)
+
+type UpdaterConfig struct {
+ // TUF configuration
+ MaxRootRotations int64
+ MaxDelegations int
+ RootMaxLength int64
+ TimestampMaxLength int64
+ SnapshotMaxLength int64
+ TargetsMaxLength int64
+ // Updater configuration
+ Fetcher fetcher.Fetcher
+ LocalTrustedRoot []byte
+ LocalMetadataDir string
+ LocalTargetsDir string
+ RemoteMetadataURL string
+ RemoteTargetsURL string
+ DisableLocalCache bool
+ PrefixTargetsWithHash bool
+ // UnsafeLocalMode only uses the metadata as written on disk
+ // if the metadata is incomplete, calling updater.Refresh will fail
+ UnsafeLocalMode bool
+}
+
+// New creates a new UpdaterConfig instance used by the Updater to
+// store configuration
+func New(remoteURL string, rootBytes []byte) (*UpdaterConfig, error) {
+ // Default URL for target files - /targets
+ targetsURL, err := url.JoinPath(remoteURL, "targets")
+ if err != nil {
+ return nil, err
+ }
+
+ return &UpdaterConfig{
+ // TUF configuration
+ MaxRootRotations: 256,
+ MaxDelegations: 32,
+ RootMaxLength: 512000, // bytes
+ TimestampMaxLength: 16384, // bytes
+ SnapshotMaxLength: 2000000, // bytes
+ TargetsMaxLength: 5000000, // bytes
+ // Updater configuration
+ Fetcher: fetcher.NewDefaultFetcher(), // use the default built-in download fetcher
+ LocalTrustedRoot: rootBytes, // trusted root.json
+ RemoteMetadataURL: remoteURL, // URL of where the TUF metadata is
+ RemoteTargetsURL: targetsURL, // URL of where the target files should be downloaded from
+ DisableLocalCache: false, // enable local caching of trusted metadata
+ PrefixTargetsWithHash: true, // use hash-prefixed target files with consistent snapshots
+ UnsafeLocalMode: false,
+ }, nil
+}
+
+func (cfg *UpdaterConfig) EnsurePathsExist() error {
+ if cfg.DisableLocalCache {
+ return nil
+ }
+
+ for _, path := range []string{cfg.LocalMetadataDir, cfg.LocalTargetsDir} {
+ if err := os.MkdirAll(path, os.ModePerm); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (cfg *UpdaterConfig) SetDefaultFetcherHTTPClient(client *http.Client) error {
+ // Check if the configured fetcher is the default fetcher
+ // since we are only configuring a http.Client value for the default fetcher
+ df, ok := cfg.Fetcher.(*fetcher.DefaultFetcher)
+ if !ok {
+ return fmt.Errorf("fetcher is not type fetcher.DefaultFetcher")
+ }
+ df.SetHTTPClient(client)
+ cfg.Fetcher = df
+ return nil
+}
+
+func (cfg *UpdaterConfig) SetDefaultFetcherTransport(rt http.RoundTripper) error {
+ // Check if the configured fetcher is the default fetcher
+ // since we are only configuring a Transport value for the default fetcher
+ df, ok := cfg.Fetcher.(*fetcher.DefaultFetcher)
+ if !ok {
+ return fmt.Errorf("fetcher is not type fetcher.DefaultFetcher")
+ }
+ if err := df.SetTransport(rt); err != nil {
+ return err
+ }
+ cfg.Fetcher = df
+ return nil
+}
+
+// SetDefaultFetcherRetry sets the constant retry interval and count for the default fetcher
+func (cfg *UpdaterConfig) SetDefaultFetcherRetry(retryInterval time.Duration, retryCount uint) error {
+ // Check if the configured fetcher is the default fetcher
+ // since we are only configuring the retry interval and count for the default fetcher
+ df, ok := cfg.Fetcher.(*fetcher.DefaultFetcher)
+ if !ok {
+ return fmt.Errorf("fetcher is not type fetcher.DefaultFetcher")
+ }
+ df.SetRetry(retryInterval, retryCount)
+ cfg.Fetcher = df
+ return nil
+}
+
+func (cfg *UpdaterConfig) SetRetryOptions(retryOptions ...backoff.RetryOption) error {
+ // Check if the configured fetcher is the default fetcher
+ // since we are only configuring retry options for the default fetcher
+ df, ok := cfg.Fetcher.(*fetcher.DefaultFetcher)
+ if !ok {
+ return fmt.Errorf("fetcher is not type fetcher.DefaultFetcher")
+ }
+ df.SetRetryOptions(retryOptions...)
+ cfg.Fetcher = df
+ return nil
+}
diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/errors.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/errors.go
new file mode 100644
index 00000000000..3bd8e5ea718
--- /dev/null
+++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/errors.go
@@ -0,0 +1,246 @@
+// Copyright 2024 The Update Framework Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+
+package metadata
+
+import (
+ "fmt"
+)
+
+// Define TUF error types used inside the new modern implementation.
+// The names chosen for TUF error types should start in 'Err' except where
+// there is a good reason not to, and provide that reason in those cases.
+
+// Repository errors
+
+// ErrRepository - an error with a repository's state, such as a missing file.
+// It covers all exceptions that come from the repository side when
+// looking from the perspective of users of metadata API or client
+type ErrRepository struct {
+ Msg string
+}
+
+func (e *ErrRepository) Error() string {
+ return fmt.Sprintf("repository error: %s", e.Msg)
+}
+
+func (e *ErrRepository) Is(target error) bool {
+ _, ok := target.(*ErrRepository)
+ return ok
+}
+
+// ErrUnsignedMetadata - An error about metadata object with insufficient threshold of signatures
+type ErrUnsignedMetadata struct {
+ Msg string
+}
+
+func (e *ErrUnsignedMetadata) Error() string {
+ return fmt.Sprintf("unsigned metadata error: %s", e.Msg)
+}
+
+// ErrUnsignedMetadata is a subset of ErrRepository
+func (e *ErrUnsignedMetadata) Is(target error) bool {
+ if _, ok := target.(*ErrUnsignedMetadata); ok {
+ return true
+ }
+ if _, ok := target.(*ErrRepository); ok {
+ return true
+ }
+ return false
+}
+
+// ErrBadVersionNumber - An error for metadata that contains an invalid version number
+type ErrBadVersionNumber struct {
+ Msg string
+}
+
+func (e *ErrBadVersionNumber) Error() string {
+ return fmt.Sprintf("bad version number error: %s", e.Msg)
+}
+
+// ErrBadVersionNumber is a subset of ErrRepository
+func (e *ErrBadVersionNumber) Is(target error) bool {
+ if _, ok := target.(*ErrBadVersionNumber); ok {
+ return true
+ }
+ if _, ok := target.(*ErrRepository); ok {
+ return true
+ }
+ return false
+}
+
+// ErrEqualVersionNumber - An error for metadata containing a previously verified version number
+type ErrEqualVersionNumber struct {
+ Msg string
+}
+
+func (e *ErrEqualVersionNumber) Error() string {
+ return fmt.Sprintf("equal version number error: %s", e.Msg)
+}
+
+// ErrEqualVersionNumber is a subset of both ErrRepository and ErrBadVersionNumber
+func (e *ErrEqualVersionNumber) Is(target error) bool {
+ if _, ok := target.(*ErrEqualVersionNumber); ok {
+ return true
+ }
+ if _, ok := target.(*ErrBadVersionNumber); ok {
+ return true
+ }
+ if _, ok := target.(*ErrRepository); ok {
+ return true
+ }
+ return false
+}
+
+// ErrExpiredMetadata - Indicate that a TUF Metadata file has expired
+type ErrExpiredMetadata struct {
+ Msg string
+}
+
+func (e *ErrExpiredMetadata) Error() string {
+ return fmt.Sprintf("expired metadata error: %s", e.Msg)
+}
+
+// ErrExpiredMetadata is a subset of ErrRepository
+func (e *ErrExpiredMetadata) Is(target error) bool {
+ if _, ok := target.(*ErrExpiredMetadata); ok {
+ return true
+ }
+ if _, ok := target.(*ErrRepository); ok {
+ return true
+ }
+ return false
+}
+
+// ErrLengthOrHashMismatch - An error while checking the length and hash values of an object
+type ErrLengthOrHashMismatch struct {
+ Msg string
+}
+
+func (e *ErrLengthOrHashMismatch) Error() string {
+ return fmt.Sprintf("length/hash verification error: %s", e.Msg)
+}
+
+// ErrLengthOrHashMismatch is a subset of ErrRepository
+func (e *ErrLengthOrHashMismatch) Is(target error) bool {
+ if _, ok := target.(*ErrLengthOrHashMismatch); ok {
+ return true
+ }
+ if _, ok := target.(*ErrRepository); ok {
+ return true
+ }
+ return false
+}
+
+// Download errors
+
+// ErrDownload - An error occurred while attempting to download a file
+type ErrDownload struct {
+ Msg string
+}
+
+func (e *ErrDownload) Error() string {
+ return fmt.Sprintf("download error: %s", e.Msg)
+}
+
+func (e *ErrDownload) Is(target error) bool {
+ _, ok := target.(*ErrDownload)
+ return ok
+}
+
+// ErrDownloadLengthMismatch - Indicate that a mismatch of lengths was seen while downloading a file
+type ErrDownloadLengthMismatch struct {
+ Msg string
+}
+
+func (e *ErrDownloadLengthMismatch) Error() string {
+ return fmt.Sprintf("download length mismatch error: %s", e.Msg)
+}
+
+// ErrDownloadLengthMismatch is a subset of ErrDownload
+func (e *ErrDownloadLengthMismatch) Is(target error) bool {
+ if _, ok := target.(*ErrDownloadLengthMismatch); ok {
+ return true
+ }
+ if _, ok := target.(*ErrDownload); ok {
+ return true
+ }
+ return false
+}
+
+// ErrDownloadHTTP - Returned by Fetcher interface implementations for HTTP errors
+type ErrDownloadHTTP struct {
+ StatusCode int
+ URL string
+}
+
+func (e *ErrDownloadHTTP) Error() string {
+ return fmt.Sprintf("failed to download %s, http status code: %d", e.URL, e.StatusCode)
+}
+
+// ErrDownloadHTTP is a subset of ErrDownload
+func (e *ErrDownloadHTTP) Is(target error) bool {
+ if _, ok := target.(*ErrDownloadHTTP); ok {
+ return true
+ }
+ if _, ok := target.(*ErrDownload); ok {
+ return true
+ }
+ return false
+}
+
+// ValueError
+type ErrValue struct {
+ Msg string
+}
+
+func (e *ErrValue) Error() string {
+ return fmt.Sprintf("value error: %s", e.Msg)
+}
+
+func (e *ErrValue) Is(err error) bool {
+ _, ok := err.(*ErrValue)
+ return ok
+}
+
+// TypeError
+type ErrType struct {
+ Msg string
+}
+
+func (e *ErrType) Error() string {
+ return fmt.Sprintf("type error: %s", e.Msg)
+}
+
+func (e *ErrType) Is(err error) bool {
+ _, ok := err.(*ErrType)
+ return ok
+}
+
+// RuntimeError
+type ErrRuntime struct {
+ Msg string
+}
+
+func (e *ErrRuntime) Error() string {
+ return fmt.Sprintf("runtime error: %s", e.Msg)
+}
+
+func (e *ErrRuntime) Is(err error) bool {
+ _, ok := err.(*ErrRuntime)
+ return ok
+}
diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/fetcher/fetcher.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/fetcher/fetcher.go
new file mode 100644
index 00000000000..71796b7891d
--- /dev/null
+++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/fetcher/fetcher.go
@@ -0,0 +1,172 @@
+// Copyright 2024 The Update Framework Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+
+package fetcher
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+ "time"
+
+ "github.com/cenkalti/backoff/v5"
+ "github.com/theupdateframework/go-tuf/v2/metadata"
+)
+
+// httpClient interface allows us to either provide a live http.Client
+// or a mock implementation for testing purposes
+type httpClient interface {
+ Do(req *http.Request) (*http.Response, error)
+}
+
+// Fetcher interface
+type Fetcher interface {
+ // DownloadFile downloads a file from the provided URL, reading
+ // up to maxLength of bytes before it aborts.
+ // The timeout argument is deprecated and not used. To configure
+ // the timeout (or retries), modify the fetcher instead. For the
+ // DefaultFetcher the underlying HTTP client can be substituted.
+ DownloadFile(urlPath string, maxLength int64, _ time.Duration) ([]byte, error)
+}
+
+// DefaultFetcher implements Fetcher
+type DefaultFetcher struct {
+ // httpClient configuration
+ httpUserAgent string
+ client httpClient
+ // retry logic configuration
+ retryOptions []backoff.RetryOption
+}
+
+func (d *DefaultFetcher) SetHTTPUserAgent(httpUserAgent string) {
+ d.httpUserAgent = httpUserAgent
+}
+
+// DownloadFile downloads a file from urlPath, errors out if it failed,
+// its length is larger than maxLength or the timeout is reached.
+func (d *DefaultFetcher) DownloadFile(urlPath string, maxLength int64, _ time.Duration) ([]byte, error) {
+ req, err := http.NewRequest("GET", urlPath, nil)
+ if err != nil {
+ return nil, err
+ }
+ // Use in case of multiple sessions.
+ if d.httpUserAgent != "" {
+ req.Header.Set("User-Agent", d.httpUserAgent)
+ }
+
+ // For backwards compatibility, if the client is nil, use the default client.
+ if d.client == nil {
+ d.client = http.DefaultClient
+ }
+
+ operation := func() ([]byte, error) {
+ // Execute the request.
+ res, err := d.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ // Handle HTTP status codes.
+ if res.StatusCode != http.StatusOK {
+ return nil, &metadata.ErrDownloadHTTP{StatusCode: res.StatusCode, URL: urlPath}
+ }
+ var length int64
+ // Get content length from header (might not be accurate, -1 or not set).
+ if header := res.Header.Get("Content-Length"); header != "" {
+ length, err = strconv.ParseInt(header, 10, 0)
+ if err != nil {
+ return nil, err
+ }
+ // Error if the reported size is greater than what is expected.
+ if length > maxLength {
+ return nil, &metadata.ErrDownloadLengthMismatch{Msg: fmt.Sprintf("download failed for %s, length %d is larger than expected %d", urlPath, length, maxLength)}
+ }
+ }
+ // Although the size has been checked above, use a LimitReader in case
+ // the reported size is inaccurate, or size is -1 which indicates an
+ // unknown length. We read maxLength + 1 in order to check if the read data
+ // surpassed our set limit.
+ data, err := io.ReadAll(io.LimitReader(res.Body, maxLength+1))
+ if err != nil {
+ return nil, err
+ }
+ // Error if the reported size is greater than what is expected.
+ length = int64(len(data))
+ if length > maxLength {
+ return nil, &metadata.ErrDownloadLengthMismatch{Msg: fmt.Sprintf("download failed for %s, length %d is larger than expected %d", urlPath, length, maxLength)}
+ }
+
+ return data, nil
+ }
+ data, err := backoff.Retry(context.Background(), operation, d.retryOptions...)
+ if err != nil {
+ return nil, err
+ }
+
+ return data, nil
+}
+
+func NewDefaultFetcher() *DefaultFetcher {
+ return &DefaultFetcher{
+ client: http.DefaultClient,
+ // default to attempting the HTTP request once
+ retryOptions: []backoff.RetryOption{backoff.WithMaxTries(1)},
+ }
+}
+
+// NewFetcherWithHTTPClient creates a new DefaultFetcher with a custom httpClient
+func (f *DefaultFetcher) NewFetcherWithHTTPClient(hc httpClient) *DefaultFetcher {
+ return &DefaultFetcher{
+ client: hc,
+ }
+}
+
+// NewFetcherWithRoundTripper creates a new DefaultFetcher with a custom RoundTripper
+// The function will create a default http.Client and replace the transport with the provided RoundTripper implementation
+func (f *DefaultFetcher) NewFetcherWithRoundTripper(rt http.RoundTripper) *DefaultFetcher {
+ client := http.DefaultClient
+ client.Transport = rt
+ return &DefaultFetcher{
+ client: client,
+ }
+}
+
+func (f *DefaultFetcher) SetHTTPClient(hc httpClient) {
+ f.client = hc
+}
+
+func (f *DefaultFetcher) SetTransport(rt http.RoundTripper) error {
+ hc, ok := f.client.(*http.Client)
+ if !ok {
+ return fmt.Errorf("fetcher is not type fetcher.DefaultFetcher")
+ }
+ hc.Transport = rt
+ f.client = hc
+ return nil
+}
+
+func (f *DefaultFetcher) SetRetry(retryInterval time.Duration, retryCount uint) {
+ constantBackOff := backoff.WithBackOff(backoff.NewConstantBackOff(retryInterval))
+ maxTryCount := backoff.WithMaxTries(retryCount)
+ f.SetRetryOptions(constantBackOff, maxTryCount)
+}
+
+func (f *DefaultFetcher) SetRetryOptions(retryOptions ...backoff.RetryOption) {
+ f.retryOptions = retryOptions
+}
diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/keys.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/keys.go
new file mode 100644
index 00000000000..57e38612be1
--- /dev/null
+++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/keys.go
@@ -0,0 +1,133 @@
+// Copyright 2024 The Update Framework Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+
+package metadata
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/hex"
+ "fmt"
+
+ "github.com/secure-systems-lab/go-securesystemslib/cjson"
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+)
+
+const (
+ KeyTypeEd25519 = "ed25519"
+ KeyTypeECDSA_SHA2_P256_COMPAT = "ecdsa-sha2-nistp256"
+ KeyTypeECDSA_SHA2_P256 = "ecdsa"
+ KeyTypeRSASSA_PSS_SHA256 = "rsa"
+ KeySchemeEd25519 = "ed25519"
+ KeySchemeECDSA_SHA2_P256 = "ecdsa-sha2-nistp256"
+ KeySchemeECDSA_SHA2_P384 = "ecdsa-sha2-nistp384"
+ KeySchemeRSASSA_PSS_SHA256 = "rsassa-pss-sha256"
+)
+
+// ToPublicKey generate crypto.PublicKey from metadata type Key
+func (k *Key) ToPublicKey() (crypto.PublicKey, error) {
+ switch k.Type {
+ case KeyTypeRSASSA_PSS_SHA256:
+ publicKey, err := cryptoutils.UnmarshalPEMToPublicKey([]byte(k.Value.PublicKey))
+ if err != nil {
+ return nil, err
+ }
+ rsaKey, ok := publicKey.(*rsa.PublicKey)
+ if !ok {
+ return nil, fmt.Errorf("invalid rsa public key")
+ }
+ // done for verification - ref. https://github.com/theupdateframework/go-tuf/pull/357
+ if _, err := x509.MarshalPKIXPublicKey(rsaKey); err != nil {
+ return nil, err
+ }
+ return rsaKey, nil
+ case KeyTypeECDSA_SHA2_P256, KeyTypeECDSA_SHA2_P256_COMPAT: // handle "ecdsa" too as python-tuf/sslib keys are using it for keytype instead of https://theupdateframework.github.io/specification/latest/index.html#keytype-ecdsa-sha2-nistp256
+ publicKey, err := cryptoutils.UnmarshalPEMToPublicKey([]byte(k.Value.PublicKey))
+ if err != nil {
+ return nil, err
+ }
+ ecdsaKey, ok := publicKey.(*ecdsa.PublicKey)
+ if !ok {
+ return nil, fmt.Errorf("invalid ecdsa public key")
+ }
+ // done for verification - ref. https://github.com/theupdateframework/go-tuf/pull/357
+ if _, err := x509.MarshalPKIXPublicKey(ecdsaKey); err != nil {
+ return nil, err
+ }
+ return ecdsaKey, nil
+ case KeyTypeEd25519:
+ publicKey, err := hex.DecodeString(k.Value.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+ ed25519Key := ed25519.PublicKey(publicKey)
+ // done for verification - ref. https://github.com/theupdateframework/go-tuf/pull/357
+ if _, err := x509.MarshalPKIXPublicKey(ed25519Key); err != nil {
+ return nil, err
+ }
+ return ed25519Key, nil
+ }
+ return nil, fmt.Errorf("unsupported public key type")
+}
+
+// KeyFromPublicKey generate metadata type Key from crypto.PublicKey
+func KeyFromPublicKey(k crypto.PublicKey) (*Key, error) {
+ key := &Key{}
+ switch k := k.(type) {
+ case *rsa.PublicKey:
+ key.Type = KeyTypeRSASSA_PSS_SHA256
+ key.Scheme = KeySchemeRSASSA_PSS_SHA256
+ pemKey, err := cryptoutils.MarshalPublicKeyToPEM(k)
+ if err != nil {
+ return nil, err
+ }
+ key.Value.PublicKey = string(pemKey)
+ case *ecdsa.PublicKey:
+ key.Type = KeyTypeECDSA_SHA2_P256
+ key.Scheme = KeySchemeECDSA_SHA2_P256
+ pemKey, err := cryptoutils.MarshalPublicKeyToPEM(k)
+ if err != nil {
+ return nil, err
+ }
+ key.Value.PublicKey = string(pemKey)
+ case ed25519.PublicKey:
+ key.Type = KeyTypeEd25519
+ key.Scheme = KeySchemeEd25519
+ key.Value.PublicKey = hex.EncodeToString(k)
+ default:
+ return nil, fmt.Errorf("unsupported public key type")
+ }
+ return key, nil
+}
+
+// ID returns the keyID value for the given Key
+func (k *Key) ID() string {
+ // the identifier is a hexdigest of the SHA-256 hash of the canonical form of the key
+ if k.id == "" {
+ data, err := cjson.EncodeCanonical(k)
+ if err != nil {
+ panic(fmt.Errorf("error creating key ID: %w", err))
+ }
+ digest := sha256.Sum256(data)
+ k.id = hex.EncodeToString(digest[:])
+ }
+ return k.id
+}
diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/logger.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/logger.go
new file mode 100644
index 00000000000..c7cab38389b
--- /dev/null
+++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/logger.go
@@ -0,0 +1,45 @@
+// Copyright 2024 The Update Framework Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+
+package metadata
+
+var log Logger = DiscardLogger{}
+
+// Logger partially implements the go-log/logr's interface:
+// https://github.com/go-logr/logr/blob/master/logr.go
+type Logger interface {
+ // Info logs a non-error message with key/value pairs
+ Info(msg string, kv ...any)
+ // Error logs an error with a given message and key/value pairs.
+ Error(err error, msg string, kv ...any)
+}
+
+type DiscardLogger struct{}
+
+func (d DiscardLogger) Info(msg string, kv ...any) {
+}
+
+func (d DiscardLogger) Error(err error, msg string, kv ...any) {
+}
+
+func SetLogger(logger Logger) {
+ log = logger
+}
+
+func GetLogger() Logger {
+ return log
+}
diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/marshal.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/marshal.go
new file mode 100644
index 00000000000..bd3f1e44bdc
--- /dev/null
+++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/marshal.go
@@ -0,0 +1,567 @@
+// Copyright 2024 The Update Framework Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+
+package metadata
+
+import (
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+)
+
+// The following marshal/unmarshal methods override the default behavior for for each TUF type
+// in order to support unrecognized fields
+
+func (signed RootType) MarshalJSON() ([]byte, error) {
+ dict := map[string]any{}
+ if len(signed.UnrecognizedFields) != 0 {
+ copyMapValues(signed.UnrecognizedFields, dict)
+ }
+ dict["_type"] = signed.Type
+ dict["spec_version"] = signed.SpecVersion
+ dict["consistent_snapshot"] = signed.ConsistentSnapshot
+ dict["version"] = signed.Version
+ dict["expires"] = signed.Expires
+ dict["keys"] = signed.Keys
+ dict["roles"] = signed.Roles
+ return json.Marshal(dict)
+}
+
+func (signed *RootType) UnmarshalJSON(data []byte) error {
+ type Alias RootType
+ var s Alias
+ if err := json.Unmarshal(data, &s); err != nil {
+ return err
+ }
+ *signed = RootType(s)
+
+ var dict map[string]any
+ if err := json.Unmarshal(data, &dict); err != nil {
+ return err
+ }
+ delete(dict, "_type")
+ delete(dict, "spec_version")
+ delete(dict, "consistent_snapshot")
+ delete(dict, "version")
+ delete(dict, "expires")
+ delete(dict, "keys")
+ delete(dict, "roles")
+ signed.UnrecognizedFields = dict
+ return nil
+}
+
+func (signed SnapshotType) MarshalJSON() ([]byte, error) {
+ dict := map[string]any{}
+ if len(signed.UnrecognizedFields) != 0 {
+ copyMapValues(signed.UnrecognizedFields, dict)
+ }
+ dict["_type"] = signed.Type
+ dict["spec_version"] = signed.SpecVersion
+ dict["version"] = signed.Version
+ dict["expires"] = signed.Expires
+ dict["meta"] = signed.Meta
+ return json.Marshal(dict)
+}
+
+func (signed *SnapshotType) UnmarshalJSON(data []byte) error {
+ type Alias SnapshotType
+ var s Alias
+ if err := json.Unmarshal(data, &s); err != nil {
+ return err
+ }
+ *signed = SnapshotType(s)
+
+ var dict map[string]any
+ if err := json.Unmarshal(data, &dict); err != nil {
+ return err
+ }
+ delete(dict, "_type")
+ delete(dict, "spec_version")
+ delete(dict, "version")
+ delete(dict, "expires")
+ delete(dict, "meta")
+ signed.UnrecognizedFields = dict
+ return nil
+}
+
+func (signed TimestampType) MarshalJSON() ([]byte, error) {
+ dict := map[string]any{}
+ if len(signed.UnrecognizedFields) != 0 {
+ copyMapValues(signed.UnrecognizedFields, dict)
+ }
+ dict["_type"] = signed.Type
+ dict["spec_version"] = signed.SpecVersion
+ dict["version"] = signed.Version
+ dict["expires"] = signed.Expires
+ dict["meta"] = signed.Meta
+ return json.Marshal(dict)
+}
+
+func (signed *TimestampType) UnmarshalJSON(data []byte) error {
+ type Alias TimestampType
+ var s Alias
+ if err := json.Unmarshal(data, &s); err != nil {
+ return err
+ }
+ *signed = TimestampType(s)
+
+ var dict map[string]any
+ if err := json.Unmarshal(data, &dict); err != nil {
+ return err
+ }
+ delete(dict, "_type")
+ delete(dict, "spec_version")
+ delete(dict, "version")
+ delete(dict, "expires")
+ delete(dict, "meta")
+ signed.UnrecognizedFields = dict
+ return nil
+}
+
+func (signed TargetsType) MarshalJSON() ([]byte, error) {
+ dict := map[string]any{}
+ if len(signed.UnrecognizedFields) != 0 {
+ copyMapValues(signed.UnrecognizedFields, dict)
+ }
+ dict["_type"] = signed.Type
+ dict["spec_version"] = signed.SpecVersion
+ dict["version"] = signed.Version
+ dict["expires"] = signed.Expires
+ dict["targets"] = signed.Targets
+ if signed.Delegations != nil {
+ dict["delegations"] = signed.Delegations
+ }
+ return json.Marshal(dict)
+}
+
+func (signed *TargetsType) UnmarshalJSON(data []byte) error {
+ type Alias TargetsType
+ var s Alias
+ if err := json.Unmarshal(data, &s); err != nil {
+ return err
+ }
+ *signed = TargetsType(s)
+
+ // populate the path field for each target
+ for name, targetFile := range signed.Targets {
+ targetFile.Path = name
+ }
+
+ var dict map[string]any
+ if err := json.Unmarshal(data, &dict); err != nil {
+ return err
+ }
+ delete(dict, "_type")
+ delete(dict, "spec_version")
+ delete(dict, "version")
+ delete(dict, "expires")
+ delete(dict, "targets")
+ delete(dict, "delegations")
+ signed.UnrecognizedFields = dict
+ return nil
+}
+
+func (signed MetaFiles) MarshalJSON() ([]byte, error) {
+ dict := map[string]any{}
+ if len(signed.UnrecognizedFields) != 0 {
+ copyMapValues(signed.UnrecognizedFields, dict)
+ }
+ // length and hashes are optional
+ if signed.Length != 0 {
+ dict["length"] = signed.Length
+ }
+ if len(signed.Hashes) != 0 {
+ dict["hashes"] = signed.Hashes
+ }
+ dict["version"] = signed.Version
+ return json.Marshal(dict)
+}
+
+func (signed *MetaFiles) UnmarshalJSON(data []byte) error {
+ type Alias MetaFiles
+ var s Alias
+ if err := json.Unmarshal(data, &s); err != nil {
+ return err
+ }
+ *signed = MetaFiles(s)
+
+ var dict map[string]any
+ if err := json.Unmarshal(data, &dict); err != nil {
+ return err
+ }
+ delete(dict, "length")
+ delete(dict, "hashes")
+ delete(dict, "version")
+ signed.UnrecognizedFields = dict
+ return nil
+}
+
+func (signed TargetFiles) MarshalJSON() ([]byte, error) {
+ dict := map[string]any{}
+ if len(signed.UnrecognizedFields) != 0 {
+ copyMapValues(signed.UnrecognizedFields, dict)
+ }
+ dict["length"] = signed.Length
+ dict["hashes"] = signed.Hashes
+ if signed.Custom != nil {
+ dict["custom"] = signed.Custom
+ }
+ return json.Marshal(dict)
+}
+
+func (signed *TargetFiles) UnmarshalJSON(data []byte) error {
+ type Alias TargetFiles
+ var s Alias
+ if err := json.Unmarshal(data, &s); err != nil {
+ return err
+ }
+ *signed = TargetFiles(s)
+
+ var dict map[string]any
+ if err := json.Unmarshal(data, &dict); err != nil {
+ return err
+ }
+ delete(dict, "length")
+ delete(dict, "hashes")
+ delete(dict, "custom")
+ signed.UnrecognizedFields = dict
+ return nil
+}
+
+func (key Key) MarshalJSON() ([]byte, error) {
+ dict := map[string]any{}
+ if len(key.UnrecognizedFields) != 0 {
+ copyMapValues(key.UnrecognizedFields, dict)
+ }
+ dict["keytype"] = key.Type
+ dict["scheme"] = key.Scheme
+ dict["keyval"] = key.Value
+ return json.Marshal(dict)
+}
+
+func (key *Key) UnmarshalJSON(data []byte) error {
+ type Alias Key
+ var a Alias
+ if err := json.Unmarshal(data, &a); err != nil {
+ return err
+ }
+ // nolint
+ *key = Key(a)
+
+ var dict map[string]any
+ if err := json.Unmarshal(data, &dict); err != nil {
+ return err
+ }
+ delete(dict, "keytype")
+ delete(dict, "scheme")
+ delete(dict, "keyval")
+ key.UnrecognizedFields = dict
+ return nil
+}
+
+func (meta Metadata[T]) MarshalJSON() ([]byte, error) {
+ dict := map[string]any{}
+ if len(meta.UnrecognizedFields) != 0 {
+ copyMapValues(meta.UnrecognizedFields, dict)
+ }
+ dict["signed"] = meta.Signed
+ dict["signatures"] = meta.Signatures
+ return json.Marshal(dict)
+}
+
+func (meta *Metadata[T]) UnmarshalJSON(data []byte) error {
+ tmp := any(new(T))
+ var m map[string]any
+ if err := json.Unmarshal(data, &m); err != nil {
+ return err
+ }
+ switch tmp.(type) {
+ case *RootType:
+ dict := struct {
+ Signed RootType `json:"signed"`
+ Signatures []Signature `json:"signatures"`
+ }{}
+ if err := json.Unmarshal(data, &dict); err != nil {
+ return err
+ }
+ var i interface{} = dict.Signed
+ meta.Signed = i.(T)
+ meta.Signatures = dict.Signatures
+ case *SnapshotType:
+ dict := struct {
+ Signed SnapshotType `json:"signed"`
+ Signatures []Signature `json:"signatures"`
+ }{}
+ if err := json.Unmarshal(data, &dict); err != nil {
+ return err
+ }
+ var i interface{} = dict.Signed
+ meta.Signed = i.(T)
+ meta.Signatures = dict.Signatures
+ case *TimestampType:
+ dict := struct {
+ Signed TimestampType `json:"signed"`
+ Signatures []Signature `json:"signatures"`
+ }{}
+ if err := json.Unmarshal(data, &dict); err != nil {
+ return err
+ }
+ var i interface{} = dict.Signed
+ meta.Signed = i.(T)
+ meta.Signatures = dict.Signatures
+ case *TargetsType:
+ dict := struct {
+ Signed TargetsType `json:"signed"`
+ Signatures []Signature `json:"signatures"`
+ }{}
+ if err := json.Unmarshal(data, &dict); err != nil {
+ return err
+ }
+ var i interface{} = dict.Signed
+ meta.Signed = i.(T)
+ meta.Signatures = dict.Signatures
+ default:
+ return &ErrValue{Msg: "unrecognized metadata type"}
+ }
+ delete(m, "signed")
+ delete(m, "signatures")
+ meta.UnrecognizedFields = m
+ return nil
+}
+
+func (s Signature) MarshalJSON() ([]byte, error) {
+ dict := map[string]any{}
+ if len(s.UnrecognizedFields) != 0 {
+ copyMapValues(s.UnrecognizedFields, dict)
+ }
+ dict["keyid"] = s.KeyID
+ dict["sig"] = s.Signature
+ return json.Marshal(dict)
+}
+
+func (s *Signature) UnmarshalJSON(data []byte) error {
+ type Alias Signature
+ var a Alias
+ if err := json.Unmarshal(data, &a); err != nil {
+ return err
+ }
+ *s = Signature(a)
+
+ var dict map[string]any
+ if err := json.Unmarshal(data, &dict); err != nil {
+ return err
+ }
+ delete(dict, "keyid")
+ delete(dict, "sig")
+ s.UnrecognizedFields = dict
+ return nil
+}
+
+func (kv KeyVal) MarshalJSON() ([]byte, error) {
+ dict := map[string]any{}
+ if len(kv.UnrecognizedFields) != 0 {
+ copyMapValues(kv.UnrecognizedFields, dict)
+ }
+ dict["public"] = kv.PublicKey
+ return json.Marshal(dict)
+}
+
+func (kv *KeyVal) UnmarshalJSON(data []byte) error {
+ type Alias KeyVal
+ var a Alias
+ if err := json.Unmarshal(data, &a); err != nil {
+ return err
+ }
+ *kv = KeyVal(a)
+
+ var dict map[string]any
+ if err := json.Unmarshal(data, &dict); err != nil {
+ return err
+ }
+ delete(dict, "public")
+ kv.UnrecognizedFields = dict
+ return nil
+}
+
+func (role Role) MarshalJSON() ([]byte, error) {
+ dict := map[string]any{}
+ if len(role.UnrecognizedFields) != 0 {
+ copyMapValues(role.UnrecognizedFields, dict)
+ }
+ dict["keyids"] = role.KeyIDs
+ dict["threshold"] = role.Threshold
+ return json.Marshal(dict)
+}
+
+func (role *Role) UnmarshalJSON(data []byte) error {
+ type Alias Role
+ var a Alias
+ if err := json.Unmarshal(data, &a); err != nil {
+ return err
+ }
+ *role = Role(a)
+
+ var dict map[string]any
+ if err := json.Unmarshal(data, &dict); err != nil {
+ return err
+ }
+ delete(dict, "keyids")
+ delete(dict, "threshold")
+ role.UnrecognizedFields = dict
+ return nil
+}
+
+func (d Delegations) MarshalJSON() ([]byte, error) {
+ dict := map[string]any{}
+ if len(d.UnrecognizedFields) != 0 {
+ copyMapValues(d.UnrecognizedFields, dict)
+ }
+ // only one is allowed
+ dict["keys"] = d.Keys
+ if d.Roles != nil {
+ dict["roles"] = d.Roles
+ } else if d.SuccinctRoles != nil {
+ dict["succinct_roles"] = d.SuccinctRoles
+ }
+ return json.Marshal(dict)
+}
+
+func (d *Delegations) UnmarshalJSON(data []byte) error {
+ type Alias Delegations
+ var a Alias
+ if err := json.Unmarshal(data, &a); err != nil {
+ return err
+ }
+ *d = Delegations(a)
+
+ var dict map[string]any
+ if err := json.Unmarshal(data, &dict); err != nil {
+ return err
+ }
+ delete(dict, "keys")
+ delete(dict, "roles")
+ delete(dict, "succinct_roles")
+ d.UnrecognizedFields = dict
+ return nil
+}
+
+func (role DelegatedRole) MarshalJSON() ([]byte, error) {
+ dict := map[string]any{}
+ if len(role.UnrecognizedFields) != 0 {
+ copyMapValues(role.UnrecognizedFields, dict)
+ }
+ dict["name"] = role.Name
+ dict["keyids"] = role.KeyIDs
+ dict["threshold"] = role.Threshold
+ dict["terminating"] = role.Terminating
+ // make sure we have only one of the two (per spec)
+ if role.Paths != nil && role.PathHashPrefixes != nil {
+ return nil, &ErrValue{Msg: "failed to marshal: not allowed to have both \"paths\" and \"path_hash_prefixes\" present"}
+ }
+ if role.Paths != nil {
+ dict["paths"] = role.Paths
+ } else if role.PathHashPrefixes != nil {
+ dict["path_hash_prefixes"] = role.PathHashPrefixes
+ }
+ return json.Marshal(dict)
+}
+
+func (role *DelegatedRole) UnmarshalJSON(data []byte) error {
+ type Alias DelegatedRole
+ var a Alias
+ if err := json.Unmarshal(data, &a); err != nil {
+ return err
+ }
+ *role = DelegatedRole(a)
+
+ var dict map[string]any
+ if err := json.Unmarshal(data, &dict); err != nil {
+ return err
+ }
+ delete(dict, "name")
+ delete(dict, "keyids")
+ delete(dict, "threshold")
+ delete(dict, "terminating")
+ delete(dict, "paths")
+ delete(dict, "path_hash_prefixes")
+ role.UnrecognizedFields = dict
+ return nil
+}
+
+func (role SuccinctRoles) MarshalJSON() ([]byte, error) {
+ dict := map[string]any{}
+ if len(role.UnrecognizedFields) != 0 {
+ copyMapValues(role.UnrecognizedFields, dict)
+ }
+ dict["keyids"] = role.KeyIDs
+ dict["threshold"] = role.Threshold
+ dict["bit_length"] = role.BitLength
+ dict["name_prefix"] = role.NamePrefix
+ return json.Marshal(dict)
+}
+
+func (role *SuccinctRoles) UnmarshalJSON(data []byte) error {
+ type Alias SuccinctRoles
+ var a Alias
+ if err := json.Unmarshal(data, &a); err != nil {
+ return err
+ }
+ *role = SuccinctRoles(a)
+
+ var dict map[string]any
+ if err := json.Unmarshal(data, &dict); err != nil {
+ return err
+ }
+ delete(dict, "keyids")
+ delete(dict, "threshold")
+ delete(dict, "bit_length")
+ delete(dict, "name_prefix")
+ role.UnrecognizedFields = dict
+ return nil
+}
+
+func (b *HexBytes) UnmarshalJSON(data []byte) error {
+ if len(data) < 2 || len(data)%2 != 0 || data[0] != '"' || data[len(data)-1] != '"' {
+ return errors.New("tuf: invalid JSON hex bytes")
+ }
+ res := make([]byte, hex.DecodedLen(len(data)-2))
+ _, err := hex.Decode(res, data[1:len(data)-1])
+ if err != nil {
+ return err
+ }
+ *b = res
+ return nil
+}
+
+func (b HexBytes) MarshalJSON() ([]byte, error) {
+ res := make([]byte, hex.EncodedLen(len(b))+2)
+ res[0] = '"'
+ res[len(res)-1] = '"'
+ hex.Encode(res[1:], b)
+ return res, nil
+}
+
+func (b HexBytes) String() string {
+ return hex.EncodeToString(b)
+}
+
+// copyMapValues copies the values of the src map to dst
+func copyMapValues(src, dst map[string]any) {
+ for k, v := range src {
+ dst[k] = v
+ }
+}
diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/metadata.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/metadata.go
new file mode 100644
index 00000000000..0d0afd850ee
--- /dev/null
+++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/metadata.go
@@ -0,0 +1,926 @@
+// Copyright 2024 The Update Framework Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+
+package metadata
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/hmac"
+ "crypto/rsa"
+ "crypto/sha256"
+ "crypto/sha512"
+ "encoding/base64"
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "hash"
+ "io"
+ "math"
+ "os"
+ "path/filepath"
+ "slices"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/secure-systems-lab/go-securesystemslib/cjson"
+ "github.com/sigstore/sigstore/pkg/signature"
+)
+
+// Root return new metadata instance of type Root
+func Root(expires ...time.Time) *Metadata[RootType] {
+ // expire now if there's nothing set
+ if len(expires) == 0 {
+ expires = []time.Time{time.Now().UTC()}
+ }
+ // populate Roles
+ roles := map[string]*Role{}
+ for _, r := range []string{ROOT, SNAPSHOT, TARGETS, TIMESTAMP} {
+ roles[r] = &Role{
+ KeyIDs: []string{},
+ Threshold: 1,
+ }
+ }
+ log.Info("Created metadata", "type", ROOT)
+ return &Metadata[RootType]{
+ Signed: RootType{
+ Type: ROOT,
+ SpecVersion: SPECIFICATION_VERSION,
+ Version: 1,
+ Expires: expires[0],
+ Keys: map[string]*Key{},
+ Roles: roles,
+ ConsistentSnapshot: true,
+ },
+ Signatures: []Signature{},
+ }
+}
+
+// Snapshot return new metadata instance of type Snapshot
+func Snapshot(expires ...time.Time) *Metadata[SnapshotType] {
+ // expire now if there's nothing set
+ if len(expires) == 0 {
+ expires = []time.Time{time.Now().UTC()}
+ }
+ log.Info("Created metadata", "type", SNAPSHOT)
+ return &Metadata[SnapshotType]{
+ Signed: SnapshotType{
+ Type: SNAPSHOT,
+ SpecVersion: SPECIFICATION_VERSION,
+ Version: 1,
+ Expires: expires[0],
+ Meta: map[string]*MetaFiles{
+ "targets.json": {
+ Version: 1,
+ },
+ },
+ },
+ Signatures: []Signature{},
+ }
+}
+
+// Timestamp return new metadata instance of type Timestamp
+func Timestamp(expires ...time.Time) *Metadata[TimestampType] {
+ // expire now if there's nothing set
+ if len(expires) == 0 {
+ expires = []time.Time{time.Now().UTC()}
+ }
+ log.Info("Created metadata", "type", TIMESTAMP)
+ return &Metadata[TimestampType]{
+ Signed: TimestampType{
+ Type: TIMESTAMP,
+ SpecVersion: SPECIFICATION_VERSION,
+ Version: 1,
+ Expires: expires[0],
+ Meta: map[string]*MetaFiles{
+ "snapshot.json": {
+ Version: 1,
+ },
+ },
+ },
+ Signatures: []Signature{},
+ }
+}
+
+// Targets return new metadata instance of type Targets
+func Targets(expires ...time.Time) *Metadata[TargetsType] {
+ // expire now if there's nothing set
+ if len(expires) == 0 {
+ expires = []time.Time{time.Now().UTC()}
+ }
+ log.Info("Created metadata", "type", TARGETS)
+ return &Metadata[TargetsType]{
+ Signed: TargetsType{
+ Type: TARGETS,
+ SpecVersion: SPECIFICATION_VERSION,
+ Version: 1,
+ Expires: expires[0],
+ Targets: map[string]*TargetFiles{},
+ },
+ Signatures: []Signature{},
+ }
+}
+
+// TargetFile return new metadata instance of type TargetFiles
+func TargetFile() *TargetFiles {
+ return &TargetFiles{
+ Length: 0,
+ Hashes: Hashes{},
+ }
+}
+
+// MetaFile return new metadata instance of type MetaFile
+func MetaFile(version int64) *MetaFiles {
+ if version < 1 {
+ // attempting to set incorrect version
+ log.Info("Attempting to set incorrect version for MetaFile", "version", version)
+ version = 1
+ }
+ return &MetaFiles{
+ Length: 0,
+ Hashes: Hashes{},
+ Version: version,
+ }
+}
+
+// FromFile load metadata from file
+func (meta *Metadata[T]) FromFile(name string) (*Metadata[T], error) {
+ data, err := os.ReadFile(name)
+ if err != nil {
+ return nil, err
+ }
+ m, err := fromBytes[T](data)
+ if err != nil {
+ return nil, err
+ }
+ *meta = *m
+ log.Info("Loaded metadata from file", "name", name)
+ return meta, nil
+}
+
+// FromBytes deserialize metadata from bytes
+func (meta *Metadata[T]) FromBytes(data []byte) (*Metadata[T], error) {
+ m, err := fromBytes[T](data)
+ if err != nil {
+ return nil, err
+ }
+ *meta = *m
+ log.Info("Loaded metadata from bytes")
+ return meta, nil
+}
+
+// ToBytes serialize metadata to bytes
+func (meta *Metadata[T]) ToBytes(pretty bool) ([]byte, error) {
+ log.Info("Writing metadata to bytes")
+ if pretty {
+ return json.MarshalIndent(*meta, "", "\t")
+ }
+ return json.Marshal(*meta)
+}
+
+// ToFile save metadata to file
+func (meta *Metadata[T]) ToFile(name string, pretty bool) error {
+ log.Info("Writing metadata to file", "name", name)
+ data, err := meta.ToBytes(pretty)
+ if err != nil {
+ return err
+ }
+ return os.WriteFile(name, data, 0644)
+}
+
+// Sign create signature over Signed and assign it to Signatures
+func (meta *Metadata[T]) Sign(signer signature.Signer) (*Signature, error) {
+ // encode the Signed part to canonical JSON so signatures are consistent
+ payload, err := cjson.EncodeCanonical(meta.Signed)
+ if err != nil {
+ return nil, err
+ }
+ // sign the Signed part
+ sb, err := signer.SignMessage(bytes.NewReader(payload))
+ if err != nil {
+ return nil, &ErrUnsignedMetadata{Msg: "problem signing metadata"}
+ }
+ // get the signer's PublicKey
+ publ, err := signer.PublicKey()
+ if err != nil {
+ return nil, err
+ }
+ // convert to TUF Key type to get keyID
+ key, err := KeyFromPublicKey(publ)
+ if err != nil {
+ return nil, err
+ }
+ // build signature
+ sig := &Signature{
+ KeyID: key.ID(),
+ Signature: sb,
+ }
+ // update the Signatures part
+ meta.Signatures = append(meta.Signatures, *sig)
+ // return the new signature
+ log.Info("Signed metadata with key", "ID", key.ID())
+ return sig, nil
+}
+
+// VerifyDelegate verifies that delegatedMetadata is signed with the required
+// threshold of keys for the delegated role delegatedRole
+func (meta *Metadata[T]) VerifyDelegate(delegatedRole string, delegatedMetadata any) error {
+ i := any(meta)
+ signingKeys := map[string]bool{}
+ var keys map[string]*Key
+ var roleKeyIDs []string
+ var roleThreshold int
+
+ log.Info("Verifying", "role", delegatedRole)
+
+ // collect keys, keyIDs and threshold based on delegator type
+ switch i := i.(type) {
+ // Root delegator
+ case *Metadata[RootType]:
+ keys = i.Signed.Keys
+ if role, ok := (*i).Signed.Roles[delegatedRole]; ok {
+ roleKeyIDs = role.KeyIDs
+ roleThreshold = role.Threshold
+ } else {
+ // the delegated role was not found, no need to proceed
+ return &ErrValue{Msg: fmt.Sprintf("no delegation found for %s", delegatedRole)}
+ }
+ // Targets delegator
+ case *Metadata[TargetsType]:
+ if i.Signed.Delegations == nil {
+ return &ErrValue{Msg: "no delegations found"}
+ }
+ keys = i.Signed.Delegations.Keys
+ if i.Signed.Delegations.Roles != nil {
+ found := false
+ for _, v := range i.Signed.Delegations.Roles {
+ if v.Name == delegatedRole {
+ found = true
+ roleKeyIDs = v.KeyIDs
+ roleThreshold = v.Threshold
+ break
+ }
+ }
+ // the delegated role was not found, no need to proceed
+ if !found {
+ return &ErrValue{Msg: fmt.Sprintf("no delegation found for %s", delegatedRole)}
+ }
+ } else if i.Signed.Delegations.SuccinctRoles != nil {
+ roleKeyIDs = i.Signed.Delegations.SuccinctRoles.KeyIDs
+ roleThreshold = i.Signed.Delegations.SuccinctRoles.Threshold
+ }
+ default:
+ return &ErrType{Msg: "call is valid only on delegator metadata (should be either root or targets)"}
+ }
+ // if there are no keyIDs for that role it means there's no delegation found
+ if len(roleKeyIDs) == 0 {
+ return &ErrValue{Msg: fmt.Sprintf("no delegation found for %s", delegatedRole)}
+ }
+ // loop through each role keyID
+ for _, keyID := range roleKeyIDs {
+ key, ok := keys[keyID]
+ if !ok {
+ return &ErrValue{Msg: fmt.Sprintf("key with ID %s not found in %s keyids", keyID, delegatedRole)}
+ }
+ sign := Signature{}
+ var payload []byte
+ // convert to a PublicKey type
+ publicKey, err := key.ToPublicKey()
+ if err != nil {
+ return err
+ }
+ // use corresponding hash function for key type
+ hash := crypto.Hash(0)
+ if key.Type != KeyTypeEd25519 {
+ switch key.Scheme {
+ case KeySchemeECDSA_SHA2_P256:
+ hash = crypto.SHA256
+ case KeySchemeECDSA_SHA2_P384:
+ hash = crypto.SHA384
+ default:
+ hash = crypto.SHA256
+ }
+ }
+ // load a verifier based on that key
+ // handle RSA PSS scheme separately as the LoadVerifier function doesn't identify it correctly
+ // Note we should support RSA PSS, not RSA PKCS1v15 (which is what LoadVerifier would return)
+ // Reference: https://theupdateframework.github.io/specification/latest/#file-formats-keys
+ var verifier signature.Verifier
+ if key.Type == KeyTypeRSASSA_PSS_SHA256 {
+ // Load a verifier for rsa
+ publicKeyRSAPSS, ok := publicKey.(*rsa.PublicKey)
+ if !ok {
+ return &ErrType{Msg: "failed to convert public key to RSA PSS key"}
+ }
+ verifier, err = signature.LoadRSAPSSVerifier(publicKeyRSAPSS, hash, &rsa.PSSOptions{Hash: crypto.SHA256})
+ } else {
+ // Load a verifier for ed25519 and ecdsa
+ verifier, err = signature.LoadVerifier(publicKey, hash)
+ }
+ if err != nil {
+ return err
+ }
+ // collect the signature for that key and build the payload we'll verify
+ // based on the Signed part of the delegated metadata
+ switch d := delegatedMetadata.(type) {
+ case *Metadata[RootType]:
+ for _, signature := range d.Signatures {
+ if signature.KeyID == keyID {
+ sign = signature
+ }
+ }
+ payload, err = cjson.EncodeCanonical(d.Signed)
+ if err != nil {
+ return err
+ }
+ case *Metadata[SnapshotType]:
+ for _, signature := range d.Signatures {
+ if signature.KeyID == keyID {
+ sign = signature
+ }
+ }
+ payload, err = cjson.EncodeCanonical(d.Signed)
+ if err != nil {
+ return err
+ }
+ case *Metadata[TimestampType]:
+ for _, signature := range d.Signatures {
+ if signature.KeyID == keyID {
+ sign = signature
+ }
+ }
+ payload, err = cjson.EncodeCanonical(d.Signed)
+ if err != nil {
+ return err
+ }
+ case *Metadata[TargetsType]:
+ for _, signature := range d.Signatures {
+ if signature.KeyID == keyID {
+ sign = signature
+ }
+ }
+ payload, err = cjson.EncodeCanonical(d.Signed)
+ if err != nil {
+ return err
+ }
+ default:
+ return &ErrType{Msg: "unknown delegated metadata type"}
+ }
+ // verify if the signature for that payload corresponds to the given key
+ if err := verifier.VerifySignature(bytes.NewReader(sign.Signature), bytes.NewReader(payload)); err != nil {
+ // failed to verify the metadata with that key ID
+ log.Info("Failed to verify %s with key ID %s", delegatedRole, keyID)
+ } else {
+ // save the verified keyID only if verification passed
+ signingKeys[keyID] = true
+ log.Info("Verified with key", "role", delegatedRole, "ID", keyID)
+ }
+ }
+ // check if the amount of valid signatures is enough
+ if len(signingKeys) < roleThreshold {
+ log.Info("Verifying failed, not enough signatures", "role", delegatedRole, "got", len(signingKeys), "want", roleThreshold)
+ return &ErrUnsignedMetadata{Msg: fmt.Sprintf("Verifying %s failed, not enough signatures, got %d, want %d", delegatedRole, len(signingKeys), roleThreshold)}
+ }
+ log.Info("Verified successfully", "role", delegatedRole)
+ return nil
+}
+
+// IsExpired returns true if metadata is expired.
+// It checks if referenceTime is after Signed.Expires
+func (signed *RootType) IsExpired(referenceTime time.Time) bool {
+ return referenceTime.After(signed.Expires)
+}
+
+// IsExpired returns true if metadata is expired.
+// It checks if referenceTime is after Signed.Expires
+func (signed *SnapshotType) IsExpired(referenceTime time.Time) bool {
+ return referenceTime.After(signed.Expires)
+}
+
+// IsExpired returns true if metadata is expired.
+// It checks if referenceTime is after Signed.Expires
+func (signed *TimestampType) IsExpired(referenceTime time.Time) bool {
+ return referenceTime.After(signed.Expires)
+}
+
+// IsExpired returns true if metadata is expired.
+// It checks if referenceTime is after Signed.Expires
+func (signed *TargetsType) IsExpired(referenceTime time.Time) bool {
+ return referenceTime.After(signed.Expires)
+}
+
+// VerifyLengthHashes checks whether the MetaFiles data matches its corresponding
+// length and hashes
+func (f *MetaFiles) VerifyLengthHashes(data []byte) error {
+ // hashes and length are optional for MetaFiles
+ if len(f.Hashes) > 0 {
+ err := verifyHashes(data, f.Hashes)
+ if err != nil {
+ return err
+ }
+ }
+ if f.Length != 0 {
+ err := verifyLength(data, f.Length)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// VerifyLengthHashes checks whether the TargetFiles data matches its corresponding
+// length and hashes
+func (f *TargetFiles) VerifyLengthHashes(data []byte) error {
+ err := verifyHashes(data, f.Hashes)
+ if err != nil {
+ return err
+ }
+ err = verifyLength(data, f.Length)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// Equal checks whether the source target file matches another
+func (source *TargetFiles) Equal(expected TargetFiles) bool {
+ if source.Length == expected.Length && source.Hashes.Equal(expected.Hashes) {
+ return true
+ }
+ return false
+}
+
+// FromFile generate TargetFiles from file
+func (t *TargetFiles) FromFile(localPath string, hashes ...string) (*TargetFiles, error) {
+ log.Info("Generating target file from file", "path", localPath)
+ // read file
+ data, err := os.ReadFile(localPath)
+ if err != nil {
+ return nil, err
+ }
+ return t.FromBytes(localPath, data, hashes...)
+}
+
+// FromBytes generate TargetFiles from bytes
+func (t *TargetFiles) FromBytes(localPath string, data []byte, hashes ...string) (*TargetFiles, error) {
+ log.Info("Generating target file from bytes", "path", localPath)
+ var hasher hash.Hash
+ targetFile := &TargetFiles{
+ Hashes: map[string]HexBytes{},
+ }
+ // use default hash algorithm if not set
+ if len(hashes) == 0 {
+ hashes = []string{"sha256"}
+ }
+ // calculate length
+ len, err := io.Copy(io.Discard, bytes.NewReader(data))
+ if err != nil {
+ return nil, err
+ }
+ targetFile.Length = len
+ for _, v := range hashes {
+ switch v {
+ case "sha256":
+ hasher = sha256.New()
+ case "sha512":
+ hasher = sha512.New()
+ default:
+ return nil, &ErrValue{Msg: fmt.Sprintf("failed generating TargetFile - unsupported hashing algorithm - %s", v)}
+ }
+ _, err := hasher.Write(data)
+ if err != nil {
+ return nil, err
+ }
+ targetFile.Hashes[v] = hasher.Sum(nil)
+ }
+ targetFile.Path = localPath
+ return targetFile, nil
+}
+
+// ClearSignatures clears Signatures
+func (meta *Metadata[T]) ClearSignatures() {
+ log.Info("Cleared signatures")
+ meta.Signatures = []Signature{}
+}
+
+// IsDelegatedPath determines whether the given "targetFilepath" is in one of
+// the paths that "DelegatedRole" is trusted to provide
+func (role *DelegatedRole) IsDelegatedPath(targetFilepath string) (bool, error) {
+ if len(role.Paths) > 0 {
+ // standard delegations
+ for _, pathPattern := range role.Paths {
+ // A delegated role path may be an explicit path or glob
+ // pattern (Unix shell-style wildcards).
+ if isTargetInPathPattern(targetFilepath, pathPattern) {
+ return true, nil
+ }
+ }
+ } else if len(role.PathHashPrefixes) > 0 {
+ // hash bin delegations - calculate the hash of the filepath to determine in which bin to find the target.
+ targetFilepathHash := sha256.Sum256([]byte(targetFilepath))
+ for _, pathHashPrefix := range role.PathHashPrefixes {
+ if strings.HasPrefix(base64.URLEncoding.EncodeToString(targetFilepathHash[:]), pathHashPrefix) {
+ return true, nil
+ }
+ }
+ }
+ return false, nil
+}
+
+// Determine whether “targetpath“ matches the “pathpattern“.
+func isTargetInPathPattern(targetpath string, pathpattern string) bool {
+ // We need to make sure that targetpath and pathpattern are pointing to
+ // the same directory as fnmatch doesn't threat "/" as a special symbol.
+ targetParts := strings.Split(targetpath, "/")
+ patternParts := strings.Split(pathpattern, "/")
+ if len(targetParts) != len(patternParts) {
+ return false
+ }
+
+ // Every part in the pathpattern could include a glob pattern, that's why
+ // each of the target and pathpattern parts should match.
+ for i := 0; i < len(targetParts); i++ {
+ if ok, _ := filepath.Match(patternParts[i], targetParts[i]); !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// GetRolesForTarget return the names and terminating status of all
+// delegated roles who are responsible for targetFilepath
+// Note the result should be an ordered list, ref. https://github.com/theupdateframework/go-tuf/security/advisories/GHSA-4f8r-qqr9-fq8j
+func (role *Delegations) GetRolesForTarget(targetFilepath string) []RoleResult {
+ var res []RoleResult
+ // Standard delegations
+ if role.Roles != nil {
+ for _, r := range role.Roles {
+ ok, err := r.IsDelegatedPath(targetFilepath)
+ if err == nil && ok {
+ res = append(res, RoleResult{Name: r.Name, Terminating: r.Terminating})
+ }
+ }
+ } else if role.SuccinctRoles != nil {
+ // SuccinctRoles delegations
+ res = role.SuccinctRoles.GetRolesForTarget(targetFilepath)
+ }
+ // We preserve the same order as the actual roles list
+ return res
+}
+
+// GetRolesForTarget calculate the name of the delegated role responsible for "targetFilepath".
+// The target at path "targetFilepath" is assigned to a bin by casting
+// the left-most "BitLength" of bits of the file path hash digest to
+// int, using it as bin index between 0 and “2**BitLength-1”.
+func (role *SuccinctRoles) GetRolesForTarget(targetFilepath string) []RoleResult {
+ // calculate the suffixLen value based on the total number of bins in
+ // hex. If bit_length = 10 then numberOfBins = 1024 or bin names will
+ // have a suffix between "000" and "3ff" in hex and suffixLen will be 3
+ // meaning the third bin will have a suffix of "003"
+ numberOfBins := math.Pow(2, float64(role.BitLength))
+ // suffixLen is calculated based on "numberOfBins - 1" as the name
+ // of the last bin contains the number "numberOfBins -1" as a suffix.
+ suffixLen := len(strconv.FormatInt(int64(numberOfBins-1), 16))
+
+ targetFilepathHash := sha256.Sum256([]byte(targetFilepath))
+ // we can't ever need more than 4 bytes (32 bits)
+ hashBytes := targetFilepathHash[:4]
+
+ // right shift hash bytes, so that we only have the leftmost
+ // bit_length bits that we care about
+ shiftValue := 32 - role.BitLength
+ binNumber := binary.BigEndian.Uint32(hashBytes) >> shiftValue
+ // add zero padding if necessary and cast to hex the suffix
+ suffix := fmt.Sprintf("%0*x", suffixLen, binNumber)
+ // we consider all succinct_roles as terminating.
+ // for more information, read TAP 15.
+ return []RoleResult{{Name: fmt.Sprintf("%s-%s", role.NamePrefix, suffix), Terminating: true}}
+}
+
+// GetRoles returns the names of all different delegated roles
+func (role *SuccinctRoles) GetRoles() []string {
+ res := []string{}
+ suffixLen, numberOfBins := role.GetSuffixLen()
+
+ for binNumber := 0; binNumber < numberOfBins; binNumber++ {
+ suffix := fmt.Sprintf("%0*x", suffixLen, binNumber)
+ res = append(res, fmt.Sprintf("%s-%s", role.NamePrefix, suffix))
+ }
+ return res
+}
+
+func (role *SuccinctRoles) GetSuffixLen() (int, int) {
+ numberOfBins := int(math.Pow(2, float64(role.BitLength)))
+ return len(strconv.FormatInt(int64(numberOfBins-1), 16)), numberOfBins
+}
+
+// IsDelegatedRole returns whether the given roleName is in one of
+// the delegated roles that “SuccinctRoles“ represents
+func (role *SuccinctRoles) IsDelegatedRole(roleName string) bool {
+ suffixLen, numberOfBins := role.GetSuffixLen()
+
+ expectedPrefix := fmt.Sprintf("%s-", role.NamePrefix)
+
+ // check if the roleName prefix is what we would expect
+ if !strings.HasPrefix(roleName, expectedPrefix) {
+ return false
+ }
+
+ // check if the roleName suffix length is what we would expect
+ suffix := roleName[len(expectedPrefix):]
+ if len(suffix) != suffixLen {
+ return false
+ }
+
+ // make sure suffix is hex value and get bin number
+ value, err := strconv.ParseInt(suffix, 16, 64)
+ if err != nil {
+ return false
+ }
+
+ // check if the bin we calculated is indeed within the range of what we support
+ return (value >= 0) && (value < int64(numberOfBins))
+}
+
+// AddKey adds new signing key for delegated role "role"
+// keyID: Identifier of the key to be added for “role“.
+// key: Signing key to be added for “role“.
+// role: Name of the role, for which “key“ is added.
+func (signed *RootType) AddKey(key *Key, role string) error {
+ // verify role is present
+ if _, ok := signed.Roles[role]; !ok {
+ return &ErrValue{Msg: fmt.Sprintf("role %s doesn't exist", role)}
+ }
+ // add keyID to role
+ if !slices.Contains(signed.Roles[role].KeyIDs, key.ID()) {
+ signed.Roles[role].KeyIDs = append(signed.Roles[role].KeyIDs, key.ID())
+ }
+ // update Keys
+ signed.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value?
+ return nil
+}
+
+// RevokeKey revoke key from “role“ and updates the Keys store.
+// keyID: Identifier of the key to be removed for “role“.
+// role: Name of the role, for which a signing key is removed.
+func (signed *RootType) RevokeKey(keyID, role string) error {
+ // verify role is present
+ if _, ok := signed.Roles[role]; !ok {
+ return &ErrValue{Msg: fmt.Sprintf("role %s doesn't exist", role)}
+ }
+ // verify keyID is present for given role
+ if !slices.Contains(signed.Roles[role].KeyIDs, keyID) {
+ return &ErrValue{Msg: fmt.Sprintf("key with id %s is not used by %s", keyID, role)}
+ }
+ // remove keyID from role
+ filteredKeyIDs := []string{}
+ for _, k := range signed.Roles[role].KeyIDs {
+ if k != keyID {
+ filteredKeyIDs = append(filteredKeyIDs, k)
+ }
+ }
+ // overwrite the old keyID slice
+ signed.Roles[role].KeyIDs = filteredKeyIDs
+ // check if keyID is used by other roles too
+ for _, r := range signed.Roles {
+ if slices.Contains(r.KeyIDs, keyID) {
+ return nil
+ }
+ }
+ // delete the keyID from Keys if it's not used anywhere else
+ delete(signed.Keys, keyID)
+ return nil
+}
+
+// AddKey adds new signing key for delegated role "role"
+// key: Signing key to be added for “role“.
+// role: Name of the role, for which “key“ is added.
+// If SuccinctRoles is used then the "role" argument can be ignored.
+func (signed *TargetsType) AddKey(key *Key, role string) error {
+ // check if Delegations are even present
+ if signed.Delegations == nil {
+ return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)}
+ }
+ // standard delegated roles
+ if signed.Delegations.Roles != nil {
+ // loop through all delegated roles
+ isDelegatedRole := false
+ for i, d := range signed.Delegations.Roles {
+ // if role is found
+ if d.Name == role {
+ isDelegatedRole = true
+ // add key if keyID is not already part of keyIDs for that role
+ if !slices.Contains(d.KeyIDs, key.ID()) {
+ signed.Delegations.Roles[i].KeyIDs = append(signed.Delegations.Roles[i].KeyIDs, key.ID())
+ signed.Delegations.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value?
+ return nil
+ }
+ log.Info("Delegated role already has keyID", "role", role, "ID", key.ID())
+ }
+ }
+ if !isDelegatedRole {
+ return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)}
+ }
+ } else if signed.Delegations.SuccinctRoles != nil {
+ // add key if keyID is not already part of keyIDs for the SuccinctRoles role
+ if !slices.Contains(signed.Delegations.SuccinctRoles.KeyIDs, key.ID()) {
+ signed.Delegations.SuccinctRoles.KeyIDs = append(signed.Delegations.SuccinctRoles.KeyIDs, key.ID())
+ signed.Delegations.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value?
+ return nil
+ }
+ log.Info("SuccinctRoles role already has keyID", "ID", key.ID())
+
+ }
+ signed.Delegations.Keys[key.ID()] = key // TODO: should we check if we don't accidentally override an existing keyID with another key value?
+ return nil
+}
+
+// RevokeKey revokes key from delegated role "role" and updates the delegations key store
+// keyID: Identifier of the key to be removed for “role“.
+// role: Name of the role, for which a signing key is removed.
+func (signed *TargetsType) RevokeKey(keyID string, role string) error {
+ // check if Delegations are even present
+ if signed.Delegations == nil {
+ return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)}
+ }
+ // standard delegated roles
+ if signed.Delegations.Roles != nil {
+ // loop through all delegated roles
+ for i, d := range signed.Delegations.Roles {
+ // if role is found
+ if d.Name == role {
+ // check if keyID is present in keyIDs for that role
+ if !slices.Contains(d.KeyIDs, keyID) {
+ return &ErrValue{Msg: fmt.Sprintf("key with id %s is not used by %s", keyID, role)}
+ }
+ // remove keyID from role
+ filteredKeyIDs := []string{}
+ for _, k := range signed.Delegations.Roles[i].KeyIDs {
+ if k != keyID {
+ filteredKeyIDs = append(filteredKeyIDs, k)
+ }
+ }
+ // overwrite the old keyID slice for that role
+ signed.Delegations.Roles[i].KeyIDs = filteredKeyIDs
+ // check if keyID is used by other roles too
+ for _, r := range signed.Delegations.Roles {
+ if slices.Contains(r.KeyIDs, keyID) {
+ return nil
+ }
+ }
+ // delete the keyID from Keys if it's not used anywhere else
+ delete(signed.Delegations.Keys, keyID)
+ return nil
+ }
+ }
+ // we haven't found the delegated role
+ return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)}
+ } else if signed.Delegations.SuccinctRoles != nil {
+ // check if keyID is used by SuccinctRoles role
+ if !slices.Contains(signed.Delegations.SuccinctRoles.KeyIDs, keyID) {
+ return &ErrValue{Msg: fmt.Sprintf("key with id %s is not used by SuccinctRoles", keyID)}
+ }
+ // remove keyID from the SuccinctRoles role
+ filteredKeyIDs := []string{}
+ for _, k := range signed.Delegations.SuccinctRoles.KeyIDs {
+ if k != keyID {
+ filteredKeyIDs = append(filteredKeyIDs, k)
+ }
+ }
+ // overwrite the old keyID slice for SuccinctRoles role
+ signed.Delegations.SuccinctRoles.KeyIDs = filteredKeyIDs
+
+ // delete the keyID from Keys since it can not be used anywhere else
+ delete(signed.Delegations.Keys, keyID)
+ return nil
+ }
+ return &ErrValue{Msg: fmt.Sprintf("delegated role %s doesn't exist", role)}
+}
+
+// Equal checks whether one hash set equals another
+func (source Hashes) Equal(expected Hashes) bool {
+ hashChecked := false
+ for typ, hash := range expected {
+ if h, ok := source[typ]; ok {
+ // hash type match found
+ hashChecked = true
+ if !hmac.Equal(h, hash) {
+ // hash values don't match
+ return false
+ }
+ }
+ }
+ return hashChecked
+}
+
+// verifyLength verifies if the passed data has the corresponding length
+func verifyLength(data []byte, length int64) error {
+ len, err := io.Copy(io.Discard, bytes.NewReader(data))
+ if err != nil {
+ return err
+ }
+ if length != len {
+ return &ErrLengthOrHashMismatch{Msg: fmt.Sprintf("length verification failed - expected %d, got %d", length, len)}
+ }
+ return nil
+}
+
+// verifyHashes verifies if the hash of the passed data corresponds to it
+func verifyHashes(data []byte, hashes Hashes) error {
+ var hasher hash.Hash
+ for k, v := range hashes {
+ switch k {
+ case "sha256":
+ hasher = sha256.New()
+ case "sha512":
+ hasher = sha512.New()
+ default:
+ return &ErrLengthOrHashMismatch{Msg: fmt.Sprintf("hash verification failed - unknown hashing algorithm - %s", k)}
+ }
+ hasher.Write(data)
+ if hex.EncodeToString(v) != hex.EncodeToString(hasher.Sum(nil)) {
+ return &ErrLengthOrHashMismatch{Msg: fmt.Sprintf("hash verification failed - mismatch for algorithm %s", k)}
+ }
+ }
+ return nil
+}
+
+// fromBytes return a *Metadata[T] object from bytes and verifies
+// that the data corresponds to the caller struct type
+func fromBytes[T Roles](data []byte) (*Metadata[T], error) {
+ meta := &Metadata[T]{}
+ // verify that the type we used to create the object is the same as the type of the metadata file
+ if err := checkType[T](data); err != nil {
+ return nil, err
+ }
+ // if all is okay, unmarshal meta to the desired Metadata[T] type
+ if err := json.Unmarshal(data, meta); err != nil {
+ return nil, err
+ }
+ // Make sure signature key IDs are unique
+ if err := checkUniqueSignatures(*meta); err != nil {
+ return nil, err
+ }
+ return meta, nil
+}
+
+// checkUniqueSignatures verifies if the signature key IDs are unique for that metadata
+func checkUniqueSignatures[T Roles](meta Metadata[T]) error {
+ signatures := []string{}
+ for _, sig := range meta.Signatures {
+ if slices.Contains(signatures, sig.KeyID) {
+ return &ErrValue{Msg: fmt.Sprintf("multiple signatures found for key ID %s", sig.KeyID)}
+ }
+ signatures = append(signatures, sig.KeyID)
+ }
+ return nil
+}
+
+// checkType verifies if the generic type used to create the object is the same as the type of the metadata file in bytes
+func checkType[T Roles](data []byte) error {
+ var m map[string]any
+ i := any(new(T))
+ if err := json.Unmarshal(data, &m); err != nil {
+ return err
+ }
+ signedType := m["signed"].(map[string]any)["_type"].(string)
+ switch i.(type) {
+ case *RootType:
+ if ROOT != signedType {
+ return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", ROOT, signedType)}
+ }
+ case *SnapshotType:
+ if SNAPSHOT != signedType {
+ return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", SNAPSHOT, signedType)}
+ }
+ case *TimestampType:
+ if TIMESTAMP != signedType {
+ return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", TIMESTAMP, signedType)}
+ }
+ case *TargetsType:
+ if TARGETS != signedType {
+ return &ErrValue{Msg: fmt.Sprintf("expected metadata type %s, got - %s", TARGETS, signedType)}
+ }
+ default:
+ return &ErrValue{Msg: fmt.Sprintf("unrecognized metadata type - %s", signedType)}
+ }
+ // all okay
+ return nil
+}
diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata/trustedmetadata.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata/trustedmetadata.go
new file mode 100644
index 00000000000..0726f31f881
--- /dev/null
+++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata/trustedmetadata.go
@@ -0,0 +1,357 @@
+// Copyright 2024 The Update Framework Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+
+package trustedmetadata
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/theupdateframework/go-tuf/v2/metadata"
+)
+
+// TrustedMetadata struct for storing trusted metadata
+type TrustedMetadata struct {
+ Root *metadata.Metadata[metadata.RootType]
+ Snapshot *metadata.Metadata[metadata.SnapshotType]
+ Timestamp *metadata.Metadata[metadata.TimestampType]
+ Targets map[string]*metadata.Metadata[metadata.TargetsType]
+ RefTime time.Time
+}
+
+// New creates a new TrustedMetadata instance which ensures that the
+// collection of metadata in it is valid and trusted through the whole
+// client update workflow. It provides easy ways to update the metadata
+// with the caller making decisions on what is updated
+func New(rootData []byte) (*TrustedMetadata, error) {
+ res := &TrustedMetadata{
+ Targets: map[string]*metadata.Metadata[metadata.TargetsType]{},
+ RefTime: time.Now().UTC(),
+ }
+ // load and validate the local root metadata
+ // valid initial trusted root metadata is required
+ err := res.loadTrustedRoot(rootData)
+ if err != nil {
+ return nil, err
+ }
+ return res, nil
+}
+
+// UpdateRoot verifies and loads “rootData“ as new root metadata.
+// Note that an expired intermediate root is considered valid: expiry is
+// only checked for the final root in UpdateTimestamp()
+func (trusted *TrustedMetadata) UpdateRoot(rootData []byte) (*metadata.Metadata[metadata.RootType], error) {
+ log := metadata.GetLogger()
+
+ if trusted.Timestamp != nil {
+ return nil, &metadata.ErrRuntime{Msg: "cannot update root after timestamp"}
+ }
+ log.Info("Updating root")
+ // generate root metadata
+ newRoot, err := metadata.Root().FromBytes(rootData)
+ if err != nil {
+ return nil, err
+ }
+ // check metadata type matches root
+ if newRoot.Signed.Type != metadata.ROOT {
+ return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.ROOT, newRoot.Signed.Type)}
+ }
+ // verify that new root is signed by trusted root
+ err = trusted.Root.VerifyDelegate(metadata.ROOT, newRoot)
+ if err != nil {
+ return nil, err
+ }
+ // verify version
+ if newRoot.Signed.Version != trusted.Root.Signed.Version+1 {
+ return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("bad version number, expected %d, got %d", trusted.Root.Signed.Version+1, newRoot.Signed.Version)}
+ }
+ // verify that new root is signed by itself
+ err = newRoot.VerifyDelegate(metadata.ROOT, newRoot)
+ if err != nil {
+ return nil, err
+ }
+ // save root if verified
+ trusted.Root = newRoot
+ log.Info("Updated root", "version", trusted.Root.Signed.Version)
+ return trusted.Root, nil
+}
+
+// UpdateTimestamp verifies and loads “timestampData“ as new timestamp metadata.
+// Note that an intermediate timestamp is allowed to be expired. "TrustedMetadata"
+// will error in this case but the intermediate timestamp will be loaded.
+// This way a newer timestamp can still be loaded (and the intermediate
+// timestamp will be used for rollback protection). Expired timestamp will
+// prevent loading snapshot metadata.
+func (trusted *TrustedMetadata) UpdateTimestamp(timestampData []byte) (*metadata.Metadata[metadata.TimestampType], error) {
+ log := metadata.GetLogger()
+
+ if trusted.Snapshot != nil {
+ return nil, &metadata.ErrRuntime{Msg: "cannot update timestamp after snapshot"}
+ }
+ // client workflow 5.3.10: Make sure final root is not expired.
+ if trusted.Root.Signed.IsExpired(trusted.RefTime) {
+ // no need to check for 5.3.11 (fast forward attack recovery):
+ // timestamp/snapshot can not yet be loaded at this point
+ return nil, &metadata.ErrExpiredMetadata{Msg: "final root.json is expired"}
+ }
+ log.Info("Updating timestamp")
+ newTimestamp, err := metadata.Timestamp().FromBytes(timestampData)
+ if err != nil {
+ return nil, err
+ }
+ // check metadata type matches timestamp
+ if newTimestamp.Signed.Type != metadata.TIMESTAMP {
+ return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.TIMESTAMP, newTimestamp.Signed.Type)}
+ }
+ // verify that new timestamp is signed by trusted root
+ err = trusted.Root.VerifyDelegate(metadata.TIMESTAMP, newTimestamp)
+ if err != nil {
+ return nil, err
+ }
+ // if an existing trusted timestamp is updated,
+ // check for a rollback attack
+ if trusted.Timestamp != nil {
+ // prevent rolling back timestamp version
+ if newTimestamp.Signed.Version < trusted.Timestamp.Signed.Version {
+ return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("new timestamp version %d must be >= %d", newTimestamp.Signed.Version, trusted.Timestamp.Signed.Version)}
+ }
+ // keep using old timestamp if versions are equal
+ if newTimestamp.Signed.Version == trusted.Timestamp.Signed.Version {
+ log.Info("New timestamp version equals the old one", "new", newTimestamp.Signed.Version, "old", trusted.Timestamp.Signed.Version)
+ return nil, &metadata.ErrEqualVersionNumber{Msg: fmt.Sprintf("new timestamp version %d equals the old one %d", newTimestamp.Signed.Version, trusted.Timestamp.Signed.Version)}
+ }
+ // prevent rolling back snapshot version
+ snapshotMeta := trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)]
+ newSnapshotMeta := newTimestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)]
+ if newSnapshotMeta.Version < snapshotMeta.Version {
+ return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("new snapshot version %d must be >= %d", newSnapshotMeta.Version, snapshotMeta.Version)}
+ }
+ }
+ // expiry not checked to allow old timestamp to be used for rollback
+ // protection of new timestamp: expiry is checked in UpdateSnapshot()
+ // save root if verified
+ trusted.Timestamp = newTimestamp
+ log.Info("Updated timestamp", "version", trusted.Timestamp.Signed.Version)
+
+ // timestamp is loaded: error if it is not valid _final_ timestamp
+ err = trusted.checkFinalTimestamp()
+ if err != nil {
+ return nil, err
+ }
+ // all okay
+ return trusted.Timestamp, nil
+}
+
+// checkFinalTimestamp verifies if trusted timestamp is not expired
+func (trusted *TrustedMetadata) checkFinalTimestamp() error {
+ if trusted.Timestamp.Signed.IsExpired(trusted.RefTime) {
+ return &metadata.ErrExpiredMetadata{Msg: "timestamp.json is expired"}
+ }
+ return nil
+}
+
+// UpdateSnapshot verifies and loads “snapshotData“ as new snapshot metadata.
+// Note that an intermediate snapshot is allowed to be expired and version
+// is allowed to not match timestamp meta version: TrustedMetadata
+// will error for case of expired metadata or when using bad versions but the
+// intermediate snapshot will be loaded. This way a newer snapshot can still
+// be loaded (and the intermediate snapshot will be used for rollback protection).
+// Expired snapshot or snapshot that does not match timestamp meta version will
+// prevent loading targets.
+func (trusted *TrustedMetadata) UpdateSnapshot(snapshotData []byte, isTrusted bool) (*metadata.Metadata[metadata.SnapshotType], error) {
+ log := metadata.GetLogger()
+
+ if trusted.Timestamp == nil {
+ return nil, &metadata.ErrRuntime{Msg: "cannot update snapshot before timestamp"}
+ }
+ if trusted.Targets[metadata.TARGETS] != nil {
+ return nil, &metadata.ErrRuntime{Msg: "cannot update snapshot after targets"}
+ }
+ log.Info("Updating snapshot")
+
+ // snapshot cannot be loaded if final timestamp is expired
+ err := trusted.checkFinalTimestamp()
+ if err != nil {
+ return nil, err
+ }
+ snapshotMeta := trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)]
+ // verify non-trusted data against the hashes in timestamp, if any.
+ // trusted snapshot data has already been verified once.
+ if !isTrusted {
+ err = snapshotMeta.VerifyLengthHashes(snapshotData)
+ if err != nil {
+ return nil, err
+ }
+ }
+ newSnapshot, err := metadata.Snapshot().FromBytes(snapshotData)
+ if err != nil {
+ return nil, err
+ }
+ // check metadata type matches snapshot
+ if newSnapshot.Signed.Type != metadata.SNAPSHOT {
+ return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.SNAPSHOT, newSnapshot.Signed.Type)}
+ }
+ // verify that new snapshot is signed by trusted root
+ err = trusted.Root.VerifyDelegate(metadata.SNAPSHOT, newSnapshot)
+ if err != nil {
+ return nil, err
+ }
+
+ // version not checked against meta version to allow old snapshot to be
+ // used in rollback protection: it is checked when targets is updated
+
+ // if an existing trusted snapshot is updated, check for rollback attack
+ if trusted.Snapshot != nil {
+ for name, info := range trusted.Snapshot.Signed.Meta {
+ newFileInfo, ok := newSnapshot.Signed.Meta[name]
+ // prevent removal of any metadata in meta
+ if !ok {
+ return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("new snapshot is missing info for %s", name)}
+ }
+ // prevent rollback of any metadata versions
+ if newFileInfo.Version < info.Version {
+ return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("expected %s version %d, got %d", name, newFileInfo.Version, info.Version)}
+ }
+ }
+ }
+
+ // expiry not checked to allow old snapshot to be used for rollback
+ // protection of new snapshot: it is checked when targets is updated
+ trusted.Snapshot = newSnapshot
+ log.Info("Updated snapshot", "version", trusted.Snapshot.Signed.Version)
+
+ // snapshot is loaded, but we error if it's not valid _final_ snapshot
+ err = trusted.checkFinalSnapshot()
+ if err != nil {
+ return nil, err
+ }
+ // all okay
+ return trusted.Snapshot, nil
+}
+
+// checkFinalSnapshot verifies if it's not expired and snapshot version matches timestamp meta version
+func (trusted *TrustedMetadata) checkFinalSnapshot() error {
+ if trusted.Snapshot.Signed.IsExpired(trusted.RefTime) {
+ return &metadata.ErrExpiredMetadata{Msg: "snapshot.json is expired"}
+ }
+ snapshotMeta := trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)]
+ if trusted.Snapshot.Signed.Version != snapshotMeta.Version {
+ return &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("expected %d, got %d", snapshotMeta.Version, trusted.Snapshot.Signed.Version)}
+ }
+ return nil
+}
+
+// UpdateTargets verifies and loads “targetsData“ as new top-level targets metadata.
+func (trusted *TrustedMetadata) UpdateTargets(targetsData []byte) (*metadata.Metadata[metadata.TargetsType], error) {
+ return trusted.UpdateDelegatedTargets(targetsData, metadata.TARGETS, metadata.ROOT)
+}
+
+// UpdateDelegatedTargets verifies and loads “targetsData“ as new metadata for target “roleName“
+func (trusted *TrustedMetadata) UpdateDelegatedTargets(targetsData []byte, roleName, delegatorName string) (*metadata.Metadata[metadata.TargetsType], error) {
+ log := metadata.GetLogger()
+
+ var ok bool
+ if trusted.Snapshot == nil {
+ return nil, &metadata.ErrRuntime{Msg: "cannot load targets before snapshot"}
+ }
+ // targets cannot be loaded if final snapshot is expired or its version
+ // does not match meta version in timestamp
+ err := trusted.checkFinalSnapshot()
+ if err != nil {
+ return nil, err
+ }
+ // check if delegator metadata is present
+ if delegatorName == metadata.ROOT {
+ if trusted.Root != nil {
+ ok = true
+ } else {
+ ok = false
+ }
+ } else {
+ _, ok = trusted.Targets[delegatorName]
+ }
+ if !ok {
+ return nil, &metadata.ErrRuntime{Msg: "cannot load targets before delegator"}
+ }
+ log.Info("Updating delegated role", "role", roleName, "delegator", delegatorName)
+ // Verify against the hashes in snapshot, if any
+ meta, ok := trusted.Snapshot.Signed.Meta[fmt.Sprintf("%s.json", roleName)]
+ if !ok {
+ return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("snapshot does not contain information for %s", roleName)}
+ }
+ err = meta.VerifyLengthHashes(targetsData)
+ if err != nil {
+ return nil, err
+ }
+ newDelegate, err := metadata.Targets().FromBytes(targetsData)
+ if err != nil {
+ return nil, err
+ }
+ // check metadata type matches targets
+ if newDelegate.Signed.Type != metadata.TARGETS {
+ return nil, &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.TARGETS, newDelegate.Signed.Type)}
+ }
+ // get delegator metadata and verify the new delegatee
+ if delegatorName == metadata.ROOT {
+ err = trusted.Root.VerifyDelegate(roleName, newDelegate)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ err = trusted.Targets[delegatorName].VerifyDelegate(roleName, newDelegate)
+ if err != nil {
+ return nil, err
+ }
+ }
+ // check versions
+ if newDelegate.Signed.Version != meta.Version {
+ return nil, &metadata.ErrBadVersionNumber{Msg: fmt.Sprintf("expected %s version %d, got %d", roleName, meta.Version, newDelegate.Signed.Version)}
+ }
+ // check expiration
+ if newDelegate.Signed.IsExpired(trusted.RefTime) {
+ return nil, &metadata.ErrExpiredMetadata{Msg: fmt.Sprintf("new %s is expired", roleName)}
+ }
+ trusted.Targets[roleName] = newDelegate
+ log.Info("Updated role", "role", roleName, "version", trusted.Targets[roleName].Signed.Version)
+ return trusted.Targets[roleName], nil
+}
+
+// loadTrustedRoot verifies and loads "data" as trusted root metadata.
+// Note that an expired initial root is considered valid: expiry is
+// only checked for the final root in “UpdateTimestamp()“.
+func (trusted *TrustedMetadata) loadTrustedRoot(rootData []byte) error {
+ log := metadata.GetLogger()
+
+ // generate root metadata
+ newRoot, err := metadata.Root().FromBytes(rootData)
+ if err != nil {
+ return err
+ }
+ // check metadata type matches root
+ if newRoot.Signed.Type != metadata.ROOT {
+ return &metadata.ErrRepository{Msg: fmt.Sprintf("expected %s, got %s", metadata.ROOT, newRoot.Signed.Type)}
+ }
+ // verify root by itself
+ err = newRoot.VerifyDelegate(metadata.ROOT, newRoot)
+ if err != nil {
+ return err
+ }
+ // save root if verified
+ trusted.Root = newRoot
+ log.Info("Loaded trusted root", "version", trusted.Root.Signed.Version)
+ return nil
+}
diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/types.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/types.go
new file mode 100644
index 00000000000..0bd86ee8f23
--- /dev/null
+++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/types.go
@@ -0,0 +1,179 @@
+// Copyright 2024 The Update Framework Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+
+package metadata
+
+import (
+ "encoding/json"
+ "time"
+)
+
+// Generic type constraint
+type Roles interface {
+ RootType | SnapshotType | TimestampType | TargetsType
+}
+
+// Define version of the TUF specification
+const (
+ SPECIFICATION_VERSION = "1.0.31"
+)
+
+// Define top level role names
+const (
+ ROOT = "root"
+ SNAPSHOT = "snapshot"
+ TARGETS = "targets"
+ TIMESTAMP = "timestamp"
+)
+
+var TOP_LEVEL_ROLE_NAMES = [...]string{ROOT, TIMESTAMP, SNAPSHOT, TARGETS}
+
+// Metadata[T Roles] represents a TUF metadata.
+// Provides methods to read and write to and
+// from file and bytes, also to create, verify and clear metadata signatures.
+type Metadata[T Roles] struct {
+ Signed T `json:"signed"`
+ Signatures []Signature `json:"signatures"`
+ UnrecognizedFields map[string]any `json:"-"`
+}
+
+// Signature represents the Signature part of a TUF metadata
+type Signature struct {
+ KeyID string `json:"keyid"`
+ Signature HexBytes `json:"sig"`
+ UnrecognizedFields map[string]any `json:"-"`
+}
+
+// RootType represents the Signed portion of a root metadata
+type RootType struct {
+ Type string `json:"_type"`
+ SpecVersion string `json:"spec_version"`
+ ConsistentSnapshot bool `json:"consistent_snapshot"`
+ Version int64 `json:"version"`
+ Expires time.Time `json:"expires"`
+ Keys map[string]*Key `json:"keys"`
+ Roles map[string]*Role `json:"roles"`
+ UnrecognizedFields map[string]any `json:"-"`
+}
+
+// SnapshotType represents the Signed portion of a snapshot metadata
+type SnapshotType struct {
+ Type string `json:"_type"`
+ SpecVersion string `json:"spec_version"`
+ Version int64 `json:"version"`
+ Expires time.Time `json:"expires"`
+ Meta map[string]*MetaFiles `json:"meta"`
+ UnrecognizedFields map[string]any `json:"-"`
+}
+
+// TargetsType represents the Signed portion of a targets metadata
+type TargetsType struct {
+ Type string `json:"_type"`
+ SpecVersion string `json:"spec_version"`
+ Version int64 `json:"version"`
+ Expires time.Time `json:"expires"`
+ Targets map[string]*TargetFiles `json:"targets"`
+ Delegations *Delegations `json:"delegations,omitempty"`
+ UnrecognizedFields map[string]any `json:"-"`
+}
+
+// TimestampType represents the Signed portion of a timestamp metadata
+type TimestampType struct {
+ Type string `json:"_type"`
+ SpecVersion string `json:"spec_version"`
+ Version int64 `json:"version"`
+ Expires time.Time `json:"expires"`
+ Meta map[string]*MetaFiles `json:"meta"`
+ UnrecognizedFields map[string]any `json:"-"`
+}
+
+// Key represents a key in TUF
+type Key struct {
+ Type string `json:"keytype"`
+ Scheme string `json:"scheme"`
+ Value KeyVal `json:"keyval"`
+ id string `json:"-"`
+ UnrecognizedFields map[string]any `json:"-"`
+}
+
+type KeyVal struct {
+ PublicKey string `json:"public"`
+ UnrecognizedFields map[string]any `json:"-"`
+}
+
+// Role represents one of the top-level roles in TUF
+type Role struct {
+ KeyIDs []string `json:"keyids"`
+ Threshold int `json:"threshold"`
+ UnrecognizedFields map[string]any `json:"-"`
+}
+
+type HexBytes []byte
+
+type Hashes map[string]HexBytes
+
+// MetaFiles represents the value portion of METAFILES in TUF (used in Snapshot and Timestamp metadata). Used to store information about a particular meta file.
+type MetaFiles struct {
+ Length int64 `json:"length,omitempty"`
+ Hashes Hashes `json:"hashes,omitempty"`
+ Version int64 `json:"version"`
+ UnrecognizedFields map[string]any `json:"-"`
+}
+
+// TargetFiles represents the value portion of TARGETS in TUF (used Targets metadata). Used to store information about a particular target file.
+type TargetFiles struct {
+ Length int64 `json:"length"`
+ Hashes Hashes `json:"hashes"`
+ Custom *json.RawMessage `json:"custom,omitempty"`
+ Path string `json:"-"`
+ UnrecognizedFields map[string]any `json:"-"`
+}
+
+// Delegations is an optional object which represents delegation roles and their corresponding keys
+type Delegations struct {
+ Keys map[string]*Key `json:"keys"`
+ Roles []DelegatedRole `json:"roles,omitempty"`
+ SuccinctRoles *SuccinctRoles `json:"succinct_roles,omitempty"`
+ UnrecognizedFields map[string]any `json:"-"`
+}
+
+// DelegatedRole represents a delegated role in TUF
+type DelegatedRole struct {
+ Name string `json:"name"`
+ KeyIDs []string `json:"keyids"`
+ Threshold int `json:"threshold"`
+ Terminating bool `json:"terminating"`
+ PathHashPrefixes []string `json:"path_hash_prefixes,omitempty"`
+ Paths []string `json:"paths,omitempty"`
+ UnrecognizedFields map[string]any `json:"-"`
+}
+
+// SuccinctRoles represents a delegation graph that covers all targets,
+// distributing them uniformly over the delegated roles (i.e. bins) in the graph.
+type SuccinctRoles struct {
+ KeyIDs []string `json:"keyids"`
+ Threshold int `json:"threshold"`
+ BitLength int `json:"bit_length"`
+ NamePrefix string `json:"name_prefix"`
+ UnrecognizedFields map[string]any `json:"-"`
+}
+
+// RoleResult represents the name and terminating status of a delegated role that is responsible for targetFilepath
+type RoleResult struct {
+ Name string
+ Terminating bool
+}
diff --git a/vendor/github.com/theupdateframework/go-tuf/v2/metadata/updater/updater.go b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/updater/updater.go
new file mode 100644
index 00000000000..bd533b63c3d
--- /dev/null
+++ b/vendor/github.com/theupdateframework/go-tuf/v2/metadata/updater/updater.go
@@ -0,0 +1,704 @@
+// Copyright 2024 The Update Framework Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+
+package updater
+
+import (
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "regexp"
+ "slices"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/theupdateframework/go-tuf/v2/metadata"
+ "github.com/theupdateframework/go-tuf/v2/metadata/config"
+ "github.com/theupdateframework/go-tuf/v2/metadata/trustedmetadata"
+)
+
+// Client update workflow implementation
+//
+// The "Updater" provides an implementation of the TUF client workflow (ref. https://theupdateframework.github.io/specification/latest/#detailed-client-workflow).
+// "Updater" provides an API to query available targets and to download them in a
+// secure manner: All downloaded files are verified by signed metadata.
+// High-level description of "Updater" functionality:
+// - Initializing an "Updater" loads and validates the trusted local root
+// metadata: This root metadata is used as the source of trust for all other
+// metadata.
+// - Refresh() can optionally be called to update and load all top-level
+// metadata as described in the specification, using both locally cached
+// metadata and metadata downloaded from the remote repository. If refresh is
+// not done explicitly, it will happen automatically during the first target
+// info lookup.
+// - Updater can be used to download targets. For each target:
+// - GetTargetInfo() is first used to find information about a
+// specific target. This will load new targets metadata as needed (from
+// local cache or remote repository).
+// - FindCachedTarget() can optionally be used to check if a
+// target file is already locally cached.
+// - DownloadTarget() downloads a target file and ensures it is
+// verified correct by the metadata.
+type Updater struct {
+ trusted *trustedmetadata.TrustedMetadata
+ cfg *config.UpdaterConfig
+}
+
+type roleParentTuple struct {
+ Role string
+ Parent string
+}
+
+// New creates a new Updater instance and loads trusted root metadata
+func New(config *config.UpdaterConfig) (*Updater, error) {
+ // make sure the trusted root metadata and remote URL were provided
+ if len(config.LocalTrustedRoot) == 0 || len(config.RemoteMetadataURL) == 0 {
+ return nil, fmt.Errorf("no initial trusted root metadata or remote URL provided")
+ }
+ // create a new trusted metadata instance using the trusted root.json
+ trustedMetadataSet, err := trustedmetadata.New(config.LocalTrustedRoot)
+ if err != nil {
+ return nil, err
+ }
+ // create an updater instance
+ updater := &Updater{
+ cfg: config,
+ trusted: trustedMetadataSet, // save trusted metadata set
+ }
+ // ensure paths exist, doesn't do anything if caching is disabled
+ err = updater.cfg.EnsurePathsExist()
+ if err != nil {
+ return nil, err
+ }
+ // persist the initial root metadata to the local metadata folder
+ err = updater.persistMetadata(metadata.ROOT, updater.cfg.LocalTrustedRoot)
+ if err != nil {
+ return nil, err
+ }
+ // all okay, return the updater instance
+ return updater, nil
+}
+
+// Refresh loads and possibly refreshes top-level metadata.
+// Downloads, verifies, and loads metadata for the top-level roles in the
+// specified order (root -> timestamp -> snapshot -> targets) implementing
+// all the checks required in the TUF client workflow.
+// A Refresh() can be done only once during the lifetime of an Updater.
+// If Refresh() has not been explicitly called before the first
+// GetTargetInfo() call, it will be done implicitly at that time.
+// The metadata for delegated roles is not updated by Refresh():
+// that happens on demand during GetTargetInfo(). However, if the
+// repository uses consistent snapshots (ref. https://theupdateframework.github.io/specification/latest/#consistent-snapshots),
+// then all metadata downloaded by the Updater will use the same consistent repository state.
+//
+// If UnsafeLocalMode is set, no network interaction is performed, only
+// the cached files on disk are used. If the cached data is not complete,
+// this call will fail.
+func (update *Updater) Refresh() error {
+ if update.cfg.UnsafeLocalMode {
+ return update.unsafeLocalRefresh()
+ }
+ return update.onlineRefresh()
+}
+
+// onlineRefresh implements the TUF client workflow as described for
+// the Refresh function.
+func (update *Updater) onlineRefresh() error {
+ err := update.loadRoot()
+ if err != nil {
+ return err
+ }
+ err = update.loadTimestamp()
+ if err != nil {
+ return err
+ }
+ err = update.loadSnapshot()
+ if err != nil {
+ return err
+ }
+ _, err = update.loadTargets(metadata.TARGETS, metadata.ROOT)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// unsafeLocalRefresh tries to load the persisted metadata already cached
+// on disk. Note that this is an usafe function, and does deviate from the
+// TUF specification section 5.3 to 5.7 (update phases).
+// The metadata on disk are verified against the provided root though,
+// and expiration dates are verified.
+func (update *Updater) unsafeLocalRefresh() error {
+ // Root is already loaded
+ // load timestamp
+ var p = filepath.Join(update.cfg.LocalMetadataDir, metadata.TIMESTAMP)
+ data, err := update.loadLocalMetadata(p)
+ if err != nil {
+ return err
+ }
+ _, err = update.trusted.UpdateTimestamp(data)
+ if err != nil {
+ return err
+ }
+
+ // load snapshot
+ p = filepath.Join(update.cfg.LocalMetadataDir, metadata.SNAPSHOT)
+ data, err = update.loadLocalMetadata(p)
+ if err != nil {
+ return err
+ }
+ _, err = update.trusted.UpdateSnapshot(data, false)
+ if err != nil {
+ return err
+ }
+
+ // targets
+ p = filepath.Join(update.cfg.LocalMetadataDir, metadata.TARGETS)
+ data, err = update.loadLocalMetadata(p)
+ if err != nil {
+ return err
+ }
+ // verify and load the new target metadata
+ _, err = update.trusted.UpdateDelegatedTargets(data, metadata.TARGETS, metadata.ROOT)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// GetTargetInfo returns metadata.TargetFiles instance with information
+// for targetPath. The return value can be used as an argument to
+// DownloadTarget() and FindCachedTarget().
+// If Refresh() has not been called before calling
+// GetTargetInfo(), the refresh will be done implicitly.
+// As a side-effect this method downloads all the additional (delegated
+// targets) metadata it needs to return the target information.
+func (update *Updater) GetTargetInfo(targetPath string) (*metadata.TargetFiles, error) {
+ // do a Refresh() in case there's no trusted targets.json yet
+ if update.trusted.Targets[metadata.TARGETS] == nil {
+ err := update.Refresh()
+ if err != nil {
+ return nil, err
+ }
+ }
+ return update.preOrderDepthFirstWalk(targetPath)
+}
+
+// DownloadTarget downloads the target file specified by targetFile
+func (update *Updater) DownloadTarget(targetFile *metadata.TargetFiles, filePath, targetBaseURL string) (string, []byte, error) {
+ log := metadata.GetLogger()
+
+ var err error
+ if filePath == "" {
+ filePath, err = update.generateTargetFilePath(targetFile)
+ if err != nil {
+ return "", nil, err
+ }
+ }
+ if targetBaseURL == "" {
+ if update.cfg.RemoteTargetsURL == "" {
+ return "", nil, &metadata.ErrValue{Msg: "targetBaseURL must be set in either DownloadTarget() or the Updater struct"}
+ }
+ targetBaseURL = ensureTrailingSlash(update.cfg.RemoteTargetsURL)
+ } else {
+ targetBaseURL = ensureTrailingSlash(targetBaseURL)
+ }
+
+ targetFilePath := targetFile.Path
+ targetRemotePath := targetFilePath
+ consistentSnapshot := update.trusted.Root.Signed.ConsistentSnapshot
+ if consistentSnapshot && update.cfg.PrefixTargetsWithHash {
+ hashes := ""
+ // get first hex value of hashes
+ for _, v := range targetFile.Hashes {
+ hashes = hex.EncodeToString(v)
+ break
+ }
+ baseName := filepath.Base(targetFilePath)
+ dirName, ok := strings.CutSuffix(targetFilePath, "/"+baseName)
+ if !ok {
+ // .
+ targetRemotePath = fmt.Sprintf("%s.%s", hashes, baseName)
+ } else {
+ // /.
+ targetRemotePath = fmt.Sprintf("%s/%s.%s", dirName, hashes, baseName)
+ }
+ }
+ fullURL := fmt.Sprintf("%s%s", targetBaseURL, targetRemotePath)
+ data, err := update.cfg.Fetcher.DownloadFile(fullURL, targetFile.Length, 0)
+ if err != nil {
+ return "", nil, err
+ }
+ err = targetFile.VerifyLengthHashes(data)
+ if err != nil {
+ return "", nil, err
+ }
+
+ // do not persist the target file if cache is disabled
+ if !update.cfg.DisableLocalCache {
+ err = os.WriteFile(filePath, data, 0644)
+ if err != nil {
+ return "", nil, err
+ }
+ }
+ log.Info("Downloaded target", "path", targetFile.Path)
+ return filePath, data, nil
+}
+
+// FindCachedTarget checks whether a local file is an up to date target
+func (update *Updater) FindCachedTarget(targetFile *metadata.TargetFiles, filePath string) (string, []byte, error) {
+ var err error
+ targetFilePath := ""
+ // do not look for cached target file if cache is disabled
+ if update.cfg.DisableLocalCache {
+ return "", nil, nil
+ }
+ // get its path if not provided
+ if filePath == "" {
+ targetFilePath, err = update.generateTargetFilePath(targetFile)
+ if err != nil {
+ return "", nil, err
+ }
+ } else {
+ targetFilePath = filePath
+ }
+ // get file content
+ data, err := os.ReadFile(targetFilePath)
+ if err != nil {
+ // do not want to return err, instead we say that there's no cached target available
+ return "", nil, nil
+ }
+ // verify if the length and hashes of this target file match the expected values
+ err = targetFile.VerifyLengthHashes(data)
+ if err != nil {
+ // do not want to return err, instead we say that there's no cached target available
+ return "", nil, nil
+ }
+ // if all okay, return its path
+ return targetFilePath, data, nil
+}
+
+// loadTimestamp load local and remote timestamp metadata
+func (update *Updater) loadTimestamp() error {
+ log := metadata.GetLogger()
+ // try to read local timestamp
+ data, err := update.loadLocalMetadata(filepath.Join(update.cfg.LocalMetadataDir, metadata.TIMESTAMP))
+ if err != nil {
+ // this means there's no existing local timestamp so we should proceed downloading it without the need to UpdateTimestamp
+ log.Info("Local timestamp does not exist")
+ } else {
+ // local timestamp exists, let's try to verify it and load it to the trusted metadata set
+ _, err := update.trusted.UpdateTimestamp(data)
+ if err != nil {
+ if errors.Is(err, &metadata.ErrRepository{}) {
+ // local timestamp is not valid, proceed downloading from remote; note that this error type includes several other subset errors
+ log.Info("Local timestamp is not valid")
+ } else {
+ // another error
+ return err
+ }
+ }
+ log.Info("Local timestamp is valid")
+ // all okay, local timestamp exists and it is valid, nevertheless proceed with downloading from remote
+ }
+ // load from remote (whether local load succeeded or not)
+ data, err = update.downloadMetadata(metadata.TIMESTAMP, update.cfg.TimestampMaxLength, "")
+ if err != nil {
+ return err
+ }
+ // try to verify and load the newly downloaded timestamp
+ _, err = update.trusted.UpdateTimestamp(data)
+ if err != nil {
+ if errors.Is(err, &metadata.ErrEqualVersionNumber{}) {
+ // if the new timestamp version is the same as current, discard the
+ // new timestamp; this is normal and it shouldn't raise any error
+ return nil
+ } else {
+ // another error
+ return err
+ }
+ }
+ // proceed with persisting the new timestamp
+ err = update.persistMetadata(metadata.TIMESTAMP, data)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// loadSnapshot load local (and if needed remote) snapshot metadata
+func (update *Updater) loadSnapshot() error {
+ log := metadata.GetLogger()
+ // try to read local snapshot
+ data, err := update.loadLocalMetadata(filepath.Join(update.cfg.LocalMetadataDir, metadata.SNAPSHOT))
+ if err != nil {
+ // this means there's no existing local snapshot so we should proceed downloading it without the need to UpdateSnapshot
+ log.Info("Local snapshot does not exist")
+ } else {
+ // successfully read a local snapshot metadata, so let's try to verify and load it to the trusted metadata set
+ _, err = update.trusted.UpdateSnapshot(data, true)
+ if err != nil {
+ // this means snapshot verification/loading failed
+ if errors.Is(err, &metadata.ErrRepository{}) {
+ // local snapshot is not valid, proceed downloading from remote; note that this error type includes several other subset errors
+ log.Info("Local snapshot is not valid")
+ } else {
+ // another error
+ return err
+ }
+ } else {
+ // this means snapshot verification/loading succeeded
+ log.Info("Local snapshot is valid: not downloading new one")
+ return nil
+ }
+ }
+ // local snapshot does not exist or is invalid, update from remote
+ log.Info("Failed to load local snapshot")
+ if update.trusted.Timestamp == nil {
+ return fmt.Errorf("trusted timestamp not set")
+ }
+ // extract the snapshot meta from the trusted timestamp metadata
+ snapshotMeta := update.trusted.Timestamp.Signed.Meta[fmt.Sprintf("%s.json", metadata.SNAPSHOT)]
+ // extract the length of the snapshot metadata to be downloaded
+ length := snapshotMeta.Length
+ if length == 0 {
+ length = update.cfg.SnapshotMaxLength
+ }
+ // extract which snapshot version should be downloaded in case of consistent snapshots
+ version := ""
+ if update.trusted.Root.Signed.ConsistentSnapshot {
+ version = strconv.FormatInt(snapshotMeta.Version, 10)
+ }
+ // download snapshot metadata
+ data, err = update.downloadMetadata(metadata.SNAPSHOT, length, version)
+ if err != nil {
+ return err
+ }
+ // verify and load the new snapshot
+ _, err = update.trusted.UpdateSnapshot(data, false)
+ if err != nil {
+ return err
+ }
+ // persist the new snapshot
+ err = update.persistMetadata(metadata.SNAPSHOT, data)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// loadTargets load local (and if needed remote) metadata for roleName
+func (update *Updater) loadTargets(roleName, parentName string) (*metadata.Metadata[metadata.TargetsType], error) {
+ log := metadata.GetLogger()
+ // avoid loading "roleName" more than once during "GetTargetInfo"
+ role, ok := update.trusted.Targets[roleName]
+ if ok {
+ return role, nil
+ }
+ // try to read local targets
+ data, err := update.loadLocalMetadata(filepath.Join(update.cfg.LocalMetadataDir, roleName))
+ if err != nil {
+ // this means there's no existing local target file so we should proceed downloading it without the need to UpdateDelegatedTargets
+ log.Info("Local role does not exist", "role", roleName)
+ } else {
+ // successfully read a local targets metadata, so let's try to verify and load it to the trusted metadata set
+ delegatedTargets, err := update.trusted.UpdateDelegatedTargets(data, roleName, parentName)
+ if err != nil {
+ // this means targets verification/loading failed
+ if errors.Is(err, &metadata.ErrRepository{}) {
+ // local target file is not valid, proceed downloading from remote; note that this error type includes several other subset errors
+ log.Info("Local role is not valid", "role", roleName)
+ } else {
+ // another error
+ return nil, err
+ }
+ } else {
+ // this means targets verification/loading succeeded
+ log.Info("Local role is valid: not downloading new one", "role", roleName)
+ return delegatedTargets, nil
+ }
+ }
+ // local "roleName" does not exist or is invalid, update from remote
+ log.Info("Failed to load local role", "role", roleName)
+ if update.trusted.Snapshot == nil {
+ return nil, fmt.Errorf("trusted snapshot not set")
+ }
+ // extract the targets' meta from the trusted snapshot metadata
+ metaInfo, ok := update.trusted.Snapshot.Signed.Meta[fmt.Sprintf("%s.json", roleName)]
+ if !ok {
+ return nil, fmt.Errorf("role %s not found in snapshot", roleName)
+ }
+ // extract the length of the target metadata to be downloaded
+ length := metaInfo.Length
+ if length == 0 {
+ length = update.cfg.TargetsMaxLength
+ }
+ // extract which target metadata version should be downloaded in case of consistent snapshots
+ version := ""
+ if update.trusted.Root.Signed.ConsistentSnapshot {
+ version = strconv.FormatInt(metaInfo.Version, 10)
+ }
+ // download targets metadata
+ data, err = update.downloadMetadata(roleName, length, version)
+ if err != nil {
+ return nil, err
+ }
+ // verify and load the new target metadata
+ delegatedTargets, err := update.trusted.UpdateDelegatedTargets(data, roleName, parentName)
+ if err != nil {
+ return nil, err
+ }
+ // persist the new target metadata
+ err = update.persistMetadata(roleName, data)
+ if err != nil {
+ return nil, err
+ }
+ return delegatedTargets, nil
+}
+
+// loadRoot load remote root metadata. Sequentially load and
+// persist on local disk every newer root metadata version
+// available on the remote
+func (update *Updater) loadRoot() error {
+ // calculate boundaries
+ lowerBound := update.trusted.Root.Signed.Version + 1
+ upperBound := lowerBound + update.cfg.MaxRootRotations
+
+ // loop until we find the latest available version of root (download -> verify -> load -> persist)
+ for nextVersion := lowerBound; nextVersion < upperBound; nextVersion++ {
+ data, err := update.downloadMetadata(metadata.ROOT, update.cfg.RootMaxLength, strconv.FormatInt(nextVersion, 10))
+ if err != nil {
+ // downloading the root metadata failed for some reason
+ var tmpErr *metadata.ErrDownloadHTTP
+ if errors.As(err, &tmpErr) {
+ if tmpErr.StatusCode != http.StatusNotFound {
+ // unexpected HTTP status code
+ return err
+ }
+ // 404 means current root is newest available, so we can stop the loop and move forward
+ break
+ }
+ // some other error ocurred
+ return err
+ } else {
+ // downloading root metadata succeeded, so let's try to verify and load it
+ _, err = update.trusted.UpdateRoot(data)
+ if err != nil {
+ return err
+ }
+ // persist root metadata to disk
+ err = update.persistMetadata(metadata.ROOT, data)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// preOrderDepthFirstWalk interrogates the tree of target delegations
+// in order of appearance (which implicitly order trustworthiness),
+// and returns the matching target found in the most trusted role.
+func (update *Updater) preOrderDepthFirstWalk(targetFilePath string) (*metadata.TargetFiles, error) {
+ log := metadata.GetLogger()
+ // list of delegations to be interrogated. A (role, parent role) pair
+ // is needed to load and verify the delegated targets metadata
+ delegationsToVisit := []roleParentTuple{{
+ Role: metadata.TARGETS,
+ Parent: metadata.ROOT,
+ }}
+ visitedRoleNames := map[string]bool{}
+ // pre-order depth-first traversal of the graph of target delegations
+ for len(visitedRoleNames) <= update.cfg.MaxDelegations && len(delegationsToVisit) > 0 {
+ // pop the role name from the top of the stack
+ delegation := delegationsToVisit[len(delegationsToVisit)-1]
+ delegationsToVisit = delegationsToVisit[:len(delegationsToVisit)-1]
+ // skip any visited current role to prevent cycles
+ _, ok := visitedRoleNames[delegation.Role]
+ if ok {
+ log.Info("Skipping visited current role", "role", delegation.Role)
+ continue
+ }
+ // the metadata for delegation.Role must be downloaded/updated before
+ // its targets, delegations, and child roles can be inspected
+ targets, err := update.loadTargets(delegation.Role, delegation.Parent)
+ if err != nil {
+ return nil, err
+ }
+ target, ok := targets.Signed.Targets[targetFilePath]
+ if ok {
+ log.Info("Found target in current role", "role", delegation.Role)
+ return target, nil
+ }
+ // after pre-order check, add current role to set of visited roles
+ visitedRoleNames[delegation.Role] = true
+ if targets.Signed.Delegations != nil {
+ var childRolesToVisit []roleParentTuple
+ // note that this may be a slow operation if there are many
+ // delegated roles
+ roles := targets.Signed.Delegations.GetRolesForTarget(targetFilePath)
+ for _, rolesForTarget := range roles {
+ log.Info("Adding child role", "role", rolesForTarget.Name)
+ childRolesToVisit = append(childRolesToVisit, roleParentTuple{Role: rolesForTarget.Name, Parent: delegation.Role})
+ if rolesForTarget.Terminating {
+ log.Info("Not backtracking to other roles")
+ delegationsToVisit = []roleParentTuple{}
+ break
+ }
+ }
+ // push childRolesToVisit in reverse order of appearance
+ // onto delegationsToVisit. Roles are popped from the end of
+ // the list
+ slices.Reverse(childRolesToVisit)
+ delegationsToVisit = append(delegationsToVisit, childRolesToVisit...)
+ }
+ }
+ if len(delegationsToVisit) > 0 {
+ log.Info("Too many roles left to visit for max allowed delegations",
+ "roles-left", len(delegationsToVisit),
+ "allowed-delegations", update.cfg.MaxDelegations)
+ }
+ // if this point is reached then target is not found, return nil
+ return nil, fmt.Errorf("target %s not found", targetFilePath)
+}
+
+// persistMetadata writes metadata to disk atomically to avoid data loss
+func (update *Updater) persistMetadata(roleName string, data []byte) error {
+ log := metadata.GetLogger()
+ // do not persist the metadata if we have disabled local caching
+ if update.cfg.DisableLocalCache {
+ return nil
+ }
+ // caching enabled, proceed with persisting the metadata locally
+ fileName := filepath.Join(update.cfg.LocalMetadataDir, fmt.Sprintf("%s.json", url.PathEscape(roleName)))
+ // create a temporary file
+ file, err := os.CreateTemp(update.cfg.LocalMetadataDir, "tuf_tmp")
+ if err != nil {
+ return err
+ }
+ // change the file permissions to our desired permissions
+ err = file.Chmod(0644)
+ if err != nil {
+ // close and delete the temporary file if there was an error while writing
+ file.Close()
+ errRemove := os.Remove(file.Name())
+ if errRemove != nil {
+ log.Info("Failed to delete temporary file", "name", file.Name())
+ }
+ return err
+ }
+ // write the data content to the temporary file
+ _, err = file.Write(data)
+ if err != nil {
+ // close and delete the temporary file if there was an error while writing
+ file.Close()
+ errRemove := os.Remove(file.Name())
+ if errRemove != nil {
+ log.Info("Failed to delete temporary file", "name", file.Name())
+ }
+ return err
+ }
+
+ // can't move/rename an open file on windows, so close it first
+ err = file.Close()
+ if err != nil {
+ return err
+ }
+ // if all okay, rename the temporary file to the desired one
+ err = os.Rename(file.Name(), fileName)
+ if err != nil {
+ return err
+ }
+ read, err := os.ReadFile(fileName)
+ if err != nil {
+ return err
+ }
+ if string(read) != string(data) {
+ return fmt.Errorf("failed to persist metadata")
+ }
+ return nil
+}
+
+// downloadMetadata download a metadata file and return it as bytes
+func (update *Updater) downloadMetadata(roleName string, length int64, version string) ([]byte, error) {
+ urlPath := ensureTrailingSlash(update.cfg.RemoteMetadataURL)
+ // build urlPath
+ if version == "" {
+ urlPath = fmt.Sprintf("%s%s.json", urlPath, url.PathEscape(roleName))
+ } else {
+ urlPath = fmt.Sprintf("%s%s.%s.json", urlPath, version, url.PathEscape(roleName))
+ }
+ return update.cfg.Fetcher.DownloadFile(urlPath, length, 0)
+}
+
+// generateTargetFilePath generates path from TargetFiles
+func (update *Updater) generateTargetFilePath(tf *metadata.TargetFiles) (string, error) {
+ // LocalTargetsDir can be omitted if caching is disabled
+ if update.cfg.LocalTargetsDir == "" && !update.cfg.DisableLocalCache {
+ return "", &metadata.ErrValue{Msg: "LocalTargetsDir must be set if filepath is not given"}
+ }
+ // Use URL encoded target path as filename
+ return filepath.Join(update.cfg.LocalTargetsDir, url.PathEscape(tf.Path)), nil
+}
+
+// loadLocalMetadata reads a local .json file and returns its bytes
+func (update *Updater) loadLocalMetadata(roleName string) ([]byte, error) {
+ return os.ReadFile(fmt.Sprintf("%s.json", roleName))
+}
+
+// GetTopLevelTargets returns the top-level target files
+func (update *Updater) GetTopLevelTargets() map[string]*metadata.TargetFiles {
+ return update.trusted.Targets[metadata.TARGETS].Signed.Targets
+}
+
+// GetTrustedMetadataSet returns the trusted metadata set
+func (update *Updater) GetTrustedMetadataSet() trustedmetadata.TrustedMetadata {
+ return *update.trusted
+}
+
+// UnsafeSetRefTime sets the reference time that the updater uses.
+// This should only be done in tests.
+// Using this function is useful when testing time-related behavior in go-tuf.
+func (update *Updater) UnsafeSetRefTime(t time.Time) {
+ update.trusted.RefTime = t
+}
+
+func IsWindowsPath(path string) bool {
+ match, _ := regexp.MatchString(`^[a-zA-Z]:\\`, path)
+ return match
+}
+
+// ensureTrailingSlash ensures url ends with a slash
+func ensureTrailingSlash(url string) string {
+ if IsWindowsPath(url) {
+ slash := string(filepath.Separator)
+ if strings.HasSuffix(url, slash) {
+ return url
+ }
+ return url + slash
+ }
+ if strings.HasSuffix(url, "/") {
+ return url
+ }
+ return url + "/"
+}
diff --git a/vendor/github.com/transparency-dev/formats/LICENSE b/vendor/github.com/transparency-dev/formats/LICENSE
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/vendor/github.com/transparency-dev/formats/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/transparency-dev/formats/log/README.md b/vendor/github.com/transparency-dev/formats/log/README.md
new file mode 100644
index 00000000000..887ce47b2a1
--- /dev/null
+++ b/vendor/github.com/transparency-dev/formats/log/README.md
@@ -0,0 +1,127 @@
+# Checkpoint format
+
+This directory contains a description and supporting golang code for
+a reusable Checkpoint format which the TrustFabric team uses in various
+projects.
+
+The format itself is heavily based on the
+[golang sumbdb head](https://sum.golang.org/latest), and corresponding
+[signed note](https://pkg.go.dev/golang.org/x/mod/sumdb/note) formats,
+and consists of two parts: a signed envelope, and a body.
+
+### Signed envelope
+
+The envelope (signed note) is of the form:
+
+* One or more non-empty lines, each terminated by `\n` (the `body`)
+* One line consisting of just one `\n` (i.e. a blank line)
+* One or more `signature` lines, each terminated by `\n`
+
+All signatures commit to the body only (including its trailing newline, but not
+the blank line's newline - see below for an example).
+
+The signature(s) themselves are in the sumdb note format (concrete example
+[below](#example)):
+
+`– `
+where:
+
+* `–` is an emdash (U+2014)
+* `` gives a human-readable representation of the signing ID
+
+and the `signature_bytes` are prefixed with the first 4 bytes of the SHA256 hash
+of the associated public key to act as a hint in identifying the correct key to
+verify with.
+
+For guidance on generating keys, see the
+[note documentation](https://pkg.go.dev/golang.org/x/mod/sumdb/note#hdr-Generating_Keys)
+and [implementation](https://cs.opensource.google/go/x/mod/+/master:sumdb/note/note.go;l=368;drc=ed3ec21bb8e252814c380df79a80f366440ddb2d).
+Of particular note is that the public key and its hash commit to the algorithm
+identifier.
+
+**Differences from sumdb note:**
+Whereas the golang signed note *implementation* currently supports only Ed25519
+signatures, the format itself is not restricted to this scheme.
+
+### Checkpoint body
+
+The checkpoint body is of the form:
+
+```text
+
+
+
+[otherdata]
+```
+
+The first 3 lines of the body **MUST** be present in all Checkpoints.
+
+* `` should be a unique identifier for the log identity which issued the checkpoint.
+ The format SHOULD be a URI-like structure like `[/]`, where the log operator
+ controls ``, e.g `example.com/log42`. This is only a recommendation, and clients MUST
+ NOT assume that the origin is a URI following this format. This structure reduces the likelihood
+ of origin collision, and gives clues to humans about the log operator and what is in the log. The
+ suffix is optional and can be anything. It is used to disambiguate logs owned under the same
+ prefix.
+
+ The presence of this identifier forms part of the log claim, and guards against two
+ logs producing bytewise identical checkpoints.
+
+* `` is the ASCII decimal representation of the number of leaves committed
+ to by this checkpoint. It should not have leading zeroes.
+
+* `` is an
+ [RFC4684 standard encoding](https://datatracker.ietf.org/doc/html/rfc4648#section-4) base-64
+ representation of the log root hash at the specified log size.
+
+* `[otherdata]` is opaque and optional, and, if necessary, can be used to tie extra
+ data to the checkpoint, however its format must conform to the sumdb signed
+ note spec (e.g. it must not contain blank lines.)
+
+> Note that golang sumdb implementation is already compatible with this
+`[otherdata]` extension (see
+).
+If you plan to use `otherdata` in your log, see the section on [merging checkpoints](#merging-checkpoints).
+
+The first signature on a checkpoint should be from the log which issued it, but there MUST NOT
+be more than one signature from a log identity present on the checkpoint.
+
+## Example
+
+An annotated example signed checkpoint in this format is shown below:
+
+
+
+
+This checkpoint was issued by the log known as "Moon Log", the log's size is
+4027504, in the `other data` section a timestamp is encoded as a 64bit hex
+value, and further application-specific data relating to the phase of the moon
+at the point the checkpoint was issued is supplied following that.
+
+## Merging Checkpoints
+
+This checkpoint format allows a checkpoint that has been independently signed by
+multiple identities to be merged, creating a single checkpoint with multiple
+signatures. This is particularly useful for witnessing, where witnesses will
+independently check consistency of the log and produce a counter-signed copy
+containing two signatures: one for the log, and one for the witness.
+
+The ability to merge signatures for the same body is a useful optimization.
+Clients that require N witness signatures will not be required to fetch N checkpoints.
+Instead they can fetch a single checkpoint and confirm it has the N required
+signatures (in addition to the log signature).
+
+Note that this optimization requires the checkpoint _body_ to be byte-equivalent.
+The log signature does not need to be equal; when merging, only one of the log's
+signatures over this body will be propagated. The core checkpoint format above
+allows merging for any two consistent checkpoints for the same tree size.
+However, if the `otherdata` extension is used then this can lead to checkpoints
+that cannot be merged, even at the same tree size.
+
+We recommend that log operators using `otherdata` consider carefully what
+information is included in this. If data is included in `otherdata` that is not
+fixed for a given tree size, then this can easily lead to unmergeable checkpoints.
+The most commonly anticipated cause for this would be including the timestamp at
+which the checkpoint was requested within the `otherdata`. In this case, no two
+witnesses are likely to ever acquire the same checkpoint body. There may be cases
+where this is unavoidable, but this consequence should be considered in the design.
diff --git a/vendor/github.com/transparency-dev/formats/log/checkpoint.go b/vendor/github.com/transparency-dev/formats/log/checkpoint.go
new file mode 100644
index 00000000000..3bbaacb3893
--- /dev/null
+++ b/vendor/github.com/transparency-dev/formats/log/checkpoint.go
@@ -0,0 +1,79 @@
+// Copyright 2021 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package log provides basic support for the common log checkpoint and proof
+// format described by the README in this directory.
+package log
+
+import (
+ "bytes"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "strconv"
+)
+
+// Checkpoint represents a minimal log checkpoint (STH).
+type Checkpoint struct {
+ // Origin is the string identifying the log which issued this checkpoint.
+ Origin string
+ // Size is the number of entries in the log at this checkpoint.
+ Size uint64
+ // Hash is the hash which commits to the contents of the entire log.
+ Hash []byte
+}
+
+// Marshal returns the common format representation of this Checkpoint.
+func (c Checkpoint) Marshal() []byte {
+ return []byte(fmt.Sprintf("%s\n%d\n%s\n", c.Origin, c.Size, base64.StdEncoding.EncodeToString(c.Hash)))
+}
+
+// Unmarshal parses the common formatted checkpoint data and stores the result
+// in the Checkpoint.
+//
+// The supplied data is expected to begin with the following 3 lines of text,
+// each followed by a newline:
+// -
+// -
+// -
+//
+// Any trailing data after this will be returned.
+func (c *Checkpoint) Unmarshal(data []byte) ([]byte, error) {
+ l := bytes.SplitN(data, []byte("\n"), 4)
+ if len(l) < 4 {
+ return nil, errors.New("invalid checkpoint - too few newlines")
+ }
+ origin := string(l[0])
+ if len(origin) == 0 {
+ return nil, errors.New("invalid checkpoint - empty origin")
+ }
+ size, err := strconv.ParseUint(string(l[1]), 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid checkpoint - size invalid: %w", err)
+ }
+ h, err := base64.StdEncoding.DecodeString(string(l[2]))
+ if err != nil {
+ return nil, fmt.Errorf("invalid checkpoint - invalid hash: %w", err)
+ }
+ var rest []byte
+ if len(l[3]) > 0 {
+ rest = l[3]
+ }
+ *c = Checkpoint{
+ Origin: origin,
+ Size: size,
+ Hash: h,
+ }
+ return rest, nil
+}
diff --git a/vendor/github.com/transparency-dev/formats/log/identifier.go b/vendor/github.com/transparency-dev/formats/log/identifier.go
new file mode 100644
index 00000000000..424794541f7
--- /dev/null
+++ b/vendor/github.com/transparency-dev/formats/log/identifier.go
@@ -0,0 +1,30 @@
+// Copyright 2021 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package log
+
+import (
+ "crypto/sha256"
+ "fmt"
+)
+
+// ID returns the identifier to use for a log given the Origin. This is the ID
+// used to find checkpoints for this log at distributors, and that will be used
+// to feed checkpoints to witnesses.
+func ID(origin string) string {
+ s := sha256.New()
+ s.Write([]byte("o:"))
+ s.Write([]byte(origin))
+ return fmt.Sprintf("%x", s.Sum(nil))
+}
diff --git a/vendor/github.com/transparency-dev/formats/log/note.go b/vendor/github.com/transparency-dev/formats/log/note.go
new file mode 100644
index 00000000000..45ea536841f
--- /dev/null
+++ b/vendor/github.com/transparency-dev/formats/log/note.go
@@ -0,0 +1,56 @@
+// Copyright 2021 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package log
+
+import (
+ "fmt"
+
+ "golang.org/x/mod/sumdb/note"
+)
+
+// ParseCheckpoint takes a raw checkpoint as bytes and returns a parsed checkpoint
+// and any otherData in the body, providing that:
+// * a valid log signature is found; and
+// * the checkpoint unmarshals correctly; and
+// * the log origin is that expected.
+// In all other cases, an empty checkpoint is returned. The underlying note is always
+// returned where possible.
+// The signatures on the note will include the log signature if no error is returned,
+// plus any signatures from otherVerifiers that were found.
+func ParseCheckpoint(chkpt []byte, origin string, logVerifier note.Verifier, otherVerifiers ...note.Verifier) (*Checkpoint, []byte, *note.Note, error) {
+ vs := append(append(make([]note.Verifier, 0, len(otherVerifiers)+1), logVerifier), otherVerifiers...)
+ verifiers := note.VerifierList(vs...)
+
+ n, err := note.Open(chkpt, verifiers)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("failed to verify signatures on checkpoint: %v", err)
+ }
+
+ for _, s := range n.Sigs {
+ if s.Hash == logVerifier.KeyHash() && s.Name == logVerifier.Name() {
+ // The log has signed this checkpoint. It is now safe to parse.
+ cp := &Checkpoint{}
+ var otherData []byte
+ if otherData, err = cp.Unmarshal([]byte(n.Text)); err != nil {
+ return nil, nil, n, fmt.Errorf("failed to unmarshal checkpoint: %v", err)
+ }
+ if cp.Origin != origin {
+ return nil, nil, n, fmt.Errorf("got Origin %q but expected %q", cp.Origin, origin)
+ }
+ return cp, otherData, n, nil
+ }
+ }
+ return nil, nil, n, fmt.Errorf("no log signature found on note")
+}
diff --git a/vendor/github.com/transparency-dev/merkle/.golangci.yaml b/vendor/github.com/transparency-dev/merkle/.golangci.yaml
new file mode 100644
index 00000000000..0675e1ef409
--- /dev/null
+++ b/vendor/github.com/transparency-dev/merkle/.golangci.yaml
@@ -0,0 +1,11 @@
+run:
+ # timeout for analysis, e.g. 30s, 5m, default is 1m
+ deadline: 90s
+
+linters-settings:
+ depguard:
+ list-type: blacklist
+ packages:
+ - golang.org/x/net/context
+ - github.com/gogo/protobuf/proto
+
diff --git a/vendor/github.com/transparency-dev/merkle/CHANGELOG.md b/vendor/github.com/transparency-dev/merkle/CHANGELOG.md
new file mode 100644
index 00000000000..b05ee0adcbd
--- /dev/null
+++ b/vendor/github.com/transparency-dev/merkle/CHANGELOG.md
@@ -0,0 +1,10 @@
+# MERKLE changelog
+
+## HEAD
+
+## v0.0.2
+ * Fuzzing support
+ * Dependency updates, notably to go1.19
+
+## v0.0.1
+Initial release
diff --git a/vendor/github.com/transparency-dev/merkle/CODEOWNERS b/vendor/github.com/transparency-dev/merkle/CODEOWNERS
new file mode 100644
index 00000000000..1b413465841
--- /dev/null
+++ b/vendor/github.com/transparency-dev/merkle/CODEOWNERS
@@ -0,0 +1 @@
+* @transparency-dev/core-team
diff --git a/vendor/github.com/transparency-dev/merkle/CONTRIBUTING.md b/vendor/github.com/transparency-dev/merkle/CONTRIBUTING.md
new file mode 100644
index 00000000000..43de4c9d470
--- /dev/null
+++ b/vendor/github.com/transparency-dev/merkle/CONTRIBUTING.md
@@ -0,0 +1,58 @@
+# How to contribute #
+
+We'd love to accept your patches and contributions to this project. There are
+a just a few small guidelines you need to follow.
+
+
+## Contributor License Agreement ##
+
+Contributions to any Google project must be accompanied by a Contributor
+License Agreement. This is not a copyright **assignment**, it simply gives
+Google permission to use and redistribute your contributions as part of the
+project.
+
+ * If you are an individual writing original source code and you're sure you
+ own the intellectual property, then you'll need to sign an [individual
+ CLA][].
+
+ * If you work for a company that wants to allow you to contribute your work,
+ then you'll need to sign a [corporate CLA][].
+
+You generally only need to submit a CLA once, so if you've already submitted
+one (even if it was for a different project), you probably don't need to do it
+again.
+
+[individual CLA]: https://developers.google.com/open-source/cla/individual
+[corporate CLA]: https://developers.google.com/open-source/cla/corporate
+
+Once your CLA is submitted (or if you already submitted one for
+another Google project), make a commit adding yourself to the
+[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part
+of your first [pull request][].
+
+[AUTHORS]: AUTHORS
+[CONTRIBUTORS]: CONTRIBUTORS
+
+
+## Submitting a patch ##
+
+ 1. It's generally best to start by opening a new issue describing the bug or
+ feature you're intending to fix. Even if you think it's relatively minor,
+ it's helpful to know what people are working on. Mention in the initial
+ issue that you are planning to work on that bug or feature so that it can
+ be assigned to you.
+
+ 1. Follow the normal process of [forking][] the project, and setup a new
+ branch to work in. It's important that each group of changes be done in
+ separate branches in order to ensure that a pull request only includes the
+ commits related to that bug or feature.
+
+ 1. Do your best to have [well-formed commit messages][] for each change.
+ This provides consistency throughout the project, and ensures that commit
+ messages are able to be formatted properly by various git tools.
+
+ 1. Finally, push the commits to your fork and submit a [pull request][].
+
+[forking]: https://help.github.com/articles/fork-a-repo
+[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
+[pull request]: https://help.github.com/articles/creating-a-pull-request
diff --git a/vendor/github.com/transparency-dev/merkle/LICENSE b/vendor/github.com/transparency-dev/merkle/LICENSE
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/vendor/github.com/transparency-dev/merkle/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/transparency-dev/merkle/README.md b/vendor/github.com/transparency-dev/merkle/README.md
new file mode 100644
index 00000000000..3c8d2127110
--- /dev/null
+++ b/vendor/github.com/transparency-dev/merkle/README.md
@@ -0,0 +1,25 @@
+# Merkle
+
+[](https://pkg.go.dev/github.com/transparency-dev/merkle)
+[](https://goreportcard.com/report/github.com/transparency-dev/merkle)
+[](https://codecov.io/gh/transparency-dev/merkle)
+[](https://gtrillian.slack.com/)
+
+## Overview
+
+This repository contains Go code to help create and manipulate Merkle trees, as
+well as constructing and verifying various types of proof.
+
+This is the data structure which is used by projects such as
+[Trillian](https://github.com/google/trillian) to provide
+[verifiable logs](https://transparency.dev/verifiable-data-structures/#verifiable-log).
+
+
+## Support
+* Mailing list: https://groups.google.com/forum/#!forum/trillian-transparency
+* Slack: https://gtrillian.slack.com/ (invitation)
+
+
+
diff --git a/vendor/github.com/transparency-dev/merkle/cloudbuild.yaml b/vendor/github.com/transparency-dev/merkle/cloudbuild.yaml
new file mode 100644
index 00000000000..eafbc790a08
--- /dev/null
+++ b/vendor/github.com/transparency-dev/merkle/cloudbuild.yaml
@@ -0,0 +1,26 @@
+timeout: 300s
+options:
+ machineType: E2_HIGHCPU_32
+ volumes:
+ - name: go-modules
+ path: /go
+ env:
+ - GOPROXY=https://proxy.golang.org
+ - PROJECT_ROOT=github.com/transparency-dev/merkle
+ - GOPATH=/go
+
+# Cloud Build logs sent to GCS bucket
+logsBucket: 'gs://trillian-cloudbuild-logs'
+
+steps:
+- id: 'lint'
+ name: "golangci/golangci-lint:v1.51"
+ args: ["golangci-lint", "run", "--timeout", "10m"]
+
+- id: 'unit tests'
+ name: 'golang:1.19'
+ args: ['go', 'test', './...']
+
+- id: 'build'
+ name: 'golang:1.19'
+ args: ['go', 'build', './...']
diff --git a/vendor/github.com/transparency-dev/merkle/compact/nodes.go b/vendor/github.com/transparency-dev/merkle/compact/nodes.go
new file mode 100644
index 00000000000..41c0854e483
--- /dev/null
+++ b/vendor/github.com/transparency-dev/merkle/compact/nodes.go
@@ -0,0 +1,89 @@
+// Copyright 2019 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compact
+
+import "math/bits"
+
+// NodeID identifies a node of a Merkle tree.
+//
+// The ID consists of a level and index within this level. Levels are numbered
+// from 0, which corresponds to the tree leaves. Within each level, nodes are
+// numbered with consecutive indices starting from 0.
+//
+// L4: ┌───────0───────┐ ...
+// L3: ┌───0───┐ ┌───1───┐ ┌─── ...
+// L2: ┌─0─┐ ┌─1─┐ ┌─2─┐ ┌─3─┐ ┌─4─┐ ...
+// L1: ┌0┐ ┌1┐ ┌2┐ ┌3┐ ┌4┐ ┌5┐ ┌6┐ ┌7┐ ┌8┐ ┌9┐ ...
+// L0: 0 1 2 3 4 5 6 7 8 9 ... ... ... ... ... ...
+//
+// When the tree is not perfect, the nodes that would complement it to perfect
+// are called ephemeral. Algorithms that operate with ephemeral nodes still map
+// them to the same address space.
+type NodeID struct {
+ Level uint
+ Index uint64
+}
+
+// NewNodeID returns a NodeID with the passed in node coordinates.
+func NewNodeID(level uint, index uint64) NodeID {
+ return NodeID{Level: level, Index: index}
+}
+
+// Parent returns the ID of the parent node.
+func (id NodeID) Parent() NodeID {
+ return NewNodeID(id.Level+1, id.Index>>1)
+}
+
+// Sibling returns the ID of the sibling node.
+func (id NodeID) Sibling() NodeID {
+ return NewNodeID(id.Level, id.Index^1)
+}
+
+// Coverage returns the [begin, end) range of leaves covered by the node.
+func (id NodeID) Coverage() (uint64, uint64) {
+ return id.Index << id.Level, (id.Index + 1) << id.Level
+}
+
+// RangeNodes appends the IDs of the nodes that comprise the [begin, end)
+// compact range to the given slice, and returns the new slice. The caller may
+// pre-allocate space with the help of the RangeSize function.
+func RangeNodes(begin, end uint64, ids []NodeID) []NodeID {
+ left, right := Decompose(begin, end)
+
+ pos := begin
+ // Iterate over perfect subtrees along the left border of the range, ordered
+ // from lower to upper levels.
+ for bit := uint64(0); left != 0; pos, left = pos+bit, left^bit {
+ level := uint(bits.TrailingZeros64(left))
+ bit = uint64(1) << level
+ ids = append(ids, NewNodeID(level, pos>>level))
+ }
+
+ // Iterate over perfect subtrees along the right border of the range, ordered
+ // from upper to lower levels.
+ for bit := uint64(0); right != 0; pos, right = pos+bit, right^bit {
+ level := uint(bits.Len64(right)) - 1
+ bit = uint64(1) << level
+ ids = append(ids, NewNodeID(level, pos>>level))
+ }
+
+ return ids
+}
+
+// RangeSize returns the number of nodes in the [begin, end) compact range.
+func RangeSize(begin, end uint64) int {
+ left, right := Decompose(begin, end)
+ return bits.OnesCount64(left) + bits.OnesCount64(right)
+}
diff --git a/vendor/github.com/transparency-dev/merkle/compact/range.go b/vendor/github.com/transparency-dev/merkle/compact/range.go
new file mode 100644
index 00000000000..0952646c4f6
--- /dev/null
+++ b/vendor/github.com/transparency-dev/merkle/compact/range.go
@@ -0,0 +1,264 @@
+// Copyright 2019 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package compact provides compact Merkle tree data structures.
+package compact
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math/bits"
+)
+
+// HashFn computes an internal node's hash using the hashes of its child nodes.
+type HashFn func(left, right []byte) []byte
+
+// VisitFn visits the node with the specified ID and hash.
+type VisitFn func(id NodeID, hash []byte)
+
+// RangeFactory allows creating compact ranges with the specified hash
+// function, which must not be nil, and must not be changed.
+type RangeFactory struct {
+ Hash HashFn
+}
+
+// NewRange creates a Range for [begin, end) with the given set of hashes. The
+// hashes correspond to the roots of the minimal set of perfect sub-trees
+// covering the [begin, end) leaves range, ordered left to right.
+func (f *RangeFactory) NewRange(begin, end uint64, hashes [][]byte) (*Range, error) {
+ if end < begin {
+ return nil, fmt.Errorf("invalid range: end=%d, want >= %d", end, begin)
+ }
+ if got, want := len(hashes), RangeSize(begin, end); got != want {
+ return nil, fmt.Errorf("invalid hashes: got %d values, want %d", got, want)
+ }
+ return &Range{f: f, begin: begin, end: end, hashes: hashes}, nil
+}
+
+// NewEmptyRange returns a new Range for an empty [begin, begin) range. The
+// value of begin defines where the range will start growing from when entries
+// are appended to it.
+func (f *RangeFactory) NewEmptyRange(begin uint64) *Range {
+ return &Range{f: f, begin: begin, end: begin}
+}
+
+// Range represents a compact Merkle tree range for leaf indices [begin, end).
+//
+// It contains the minimal set of perfect subtrees whose leaves comprise this
+// range. The structure is efficiently mergeable with other compact ranges that
+// share one of the endpoints with it.
+//
+// For more details, see
+// https://github.com/transparency-dev/merkle/blob/main/docs/compact_ranges.md.
+type Range struct {
+ f *RangeFactory
+ begin uint64
+ end uint64
+ hashes [][]byte
+}
+
+// Begin returns the first index covered by the range (inclusive).
+func (r *Range) Begin() uint64 {
+ return r.begin
+}
+
+// End returns the last index covered by the range (exclusive).
+func (r *Range) End() uint64 {
+ return r.end
+}
+
+// Hashes returns sub-tree hashes corresponding to the minimal set of perfect
+// sub-trees covering the [begin, end) range, ordered left to right.
+func (r *Range) Hashes() [][]byte {
+ return r.hashes
+}
+
+// Append extends the compact range by appending the passed in hash to it. It
+// reports all the added nodes through the visitor function (if non-nil).
+func (r *Range) Append(hash []byte, visitor VisitFn) error {
+ if visitor != nil {
+ visitor(NewNodeID(0, r.end), hash)
+ }
+ return r.appendImpl(r.end+1, hash, nil, visitor)
+}
+
+// AppendRange extends the compact range by merging in the other compact range
+// from the right. It uses the tree hasher to calculate hashes of newly created
+// nodes, and reports them through the visitor function (if non-nil).
+func (r *Range) AppendRange(other *Range, visitor VisitFn) error {
+ if other.f != r.f {
+ return errors.New("incompatible ranges")
+ }
+ if got, want := other.begin, r.end; got != want {
+ return fmt.Errorf("ranges are disjoint: other.begin=%d, want %d", got, want)
+ }
+ if len(other.hashes) == 0 { // The other range is empty, merging is trivial.
+ return nil
+ }
+ return r.appendImpl(other.end, other.hashes[0], other.hashes[1:], visitor)
+}
+
+// GetRootHash returns the root hash of the Merkle tree represented by this
+// compact range. Requires the range to start at index 0. If the range is
+// empty, returns nil.
+//
+// If visitor is not nil, it is called with all "ephemeral" nodes (i.e. the
+// ones rooting imperfect subtrees) along the right border of the tree.
+func (r *Range) GetRootHash(visitor VisitFn) ([]byte, error) {
+ if r.begin != 0 {
+ return nil, fmt.Errorf("begin=%d, want 0", r.begin)
+ }
+ ln := len(r.hashes)
+ if ln == 0 {
+ return nil, nil
+ }
+ hash := r.hashes[ln-1]
+ // All non-perfect subtree hashes along the right border of the tree
+ // correspond to the parents of all perfect subtree nodes except the lowest
+ // one (therefore the loop skips it).
+ for i, size := ln-2, r.end; i >= 0; i-- {
+ hash = r.f.Hash(r.hashes[i], hash)
+ if visitor != nil {
+ size &= size - 1 // Delete the previous node.
+ level := uint(bits.TrailingZeros64(size)) + 1 // Compute the parent level.
+ index := size >> level // And its horizontal index.
+ visitor(NewNodeID(level, index), hash)
+ }
+ }
+ return hash, nil
+}
+
+// Equal compares two Ranges for equality.
+func (r *Range) Equal(other *Range) bool {
+ if r.f != other.f || r.begin != other.begin || r.end != other.end {
+ return false
+ }
+ if len(r.hashes) != len(other.hashes) {
+ return false
+ }
+ for i := range r.hashes {
+ if !bytes.Equal(r.hashes[i], other.hashes[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// appendImpl extends the compact range by merging the [r.end, end) compact
+// range into it. The other compact range is decomposed into a seed hash and
+// all the other hashes (possibly none). The method uses the tree hasher to
+// calculate hashes of newly created nodes, and reports them through the
+// visitor function (if non-nil).
+func (r *Range) appendImpl(end uint64, seed []byte, hashes [][]byte, visitor VisitFn) error {
+ // Bits [low, high) of r.end encode the merge path, i.e. the sequence of node
+ // merges that transforms the two compact ranges into one.
+ low, high := getMergePath(r.begin, r.end, end)
+ if high < low {
+ high = low
+ }
+ index := r.end >> low
+ // Now bits [0, high-low) of index encode the merge path.
+
+ // The number of one bits in index is the number of nodes from the left range
+ // that will be merged, and zero bits correspond to the nodes in the right
+ // range. Below we make sure that both ranges have enough hashes, which can
+ // be false only in case the data is corrupted in some way.
+ ones := bits.OnesCount64(index & (1<<(high-low) - 1))
+ if ln := len(r.hashes); ln < ones {
+ return fmt.Errorf("corrupted lhs range: got %d hashes, want >= %d", ln, ones)
+ }
+ if ln, zeros := len(hashes), int(high-low)-ones; ln < zeros {
+ return fmt.Errorf("corrupted rhs range: got %d hashes, want >= %d", ln+1, zeros+1)
+ }
+
+ // Some of the trailing nodes of the left compact range, and some of the
+ // leading nodes of the right range, are sequentially merged with the seed,
+ // according to the mask. All new nodes are reported through the visitor.
+ idx1, idx2 := len(r.hashes), 0
+ for h := low; h < high; h++ {
+ if index&1 == 0 {
+ seed = r.f.Hash(seed, hashes[idx2])
+ idx2++
+ } else {
+ idx1--
+ seed = r.f.Hash(r.hashes[idx1], seed)
+ }
+ index >>= 1
+ if visitor != nil {
+ visitor(NewNodeID(h+1, index), seed)
+ }
+ }
+
+ // All nodes from both ranges that have not been merged are bundled together
+ // with the "merged" seed node.
+ r.hashes = append(append(r.hashes[:idx1], seed), hashes[idx2:]...)
+ r.end = end
+ return nil
+}
+
+// getMergePath returns the merging path between the compact range [begin, mid)
+// and [mid, end). The path is represented as a range of bits within mid, with
+// bit indices [low, high). A bit value of 1 on level i of mid means that the
+// node on this level merges with the corresponding node in the left compact
+// range, whereas 0 represents merging with the right compact range. If the
+// path is empty then high <= low.
+//
+// The output is not specified if begin <= mid <= end doesn't hold, but the
+// function never panics.
+func getMergePath(begin, mid, end uint64) (uint, uint) {
+ low := bits.TrailingZeros64(mid)
+ high := 64
+ if begin != 0 {
+ high = bits.Len64(mid ^ (begin - 1))
+ }
+ if high2 := bits.Len64((mid - 1) ^ end); high2 < high {
+ high = high2
+ }
+ return uint(low), uint(high - 1)
+}
+
+// Decompose splits the [begin, end) range into a minimal number of sub-ranges,
+// each of which is of the form [m * 2^k, (m+1) * 2^k), i.e. of length 2^k, for
+// some integers m, k >= 0.
+//
+// The sequence of sizes is returned encoded as bitmasks left and right, where:
+// - a 1 bit in a bitmask denotes a sub-range of the corresponding size 2^k
+// - left mask bits in LSB-to-MSB order encode the left part of the sequence
+// - right mask bits in MSB-to-LSB order encode the right part
+//
+// The corresponding values of m are not returned (they can be calculated from
+// begin and the sub-range sizes).
+//
+// For example, (begin, end) values of (0b110, 0b11101) would indicate a
+// sequence of tree sizes: 2,8; 8,4,1.
+//
+// The output is not specified if begin > end, but the function never panics.
+func Decompose(begin, end uint64) (uint64, uint64) {
+ // Special case, as the code below works only if begin != 0, or end < 2^63.
+ if begin == 0 {
+ return 0, end
+ }
+ xbegin := begin - 1
+ // Find where paths to leaves #begin-1 and #end diverge, and mask the upper
+ // bits away, as only the nodes strictly below this point are in the range.
+ d := bits.Len64(xbegin^end) - 1
+ mask := uint64(1)<= size {
+ return Nodes{}, fmt.Errorf("index %d out of bounds for tree size %d", index, size)
+ }
+ return nodes(index, 0, size).skipFirst(), nil
+}
+
+// Consistency returns the information on how to fetch and construct a
+// consistency proof between the two given tree sizes of a log Merkle tree. It
+// requires 0 <= size1 <= size2.
+func Consistency(size1, size2 uint64) (Nodes, error) {
+ if size1 > size2 {
+ return Nodes{}, fmt.Errorf("tree size %d > %d", size1, size2)
+ }
+ if size1 == size2 || size1 == 0 {
+ return Nodes{IDs: []compact.NodeID{}}, nil
+ }
+
+ // Find the root of the biggest perfect subtree that ends at size1.
+ level := uint(bits.TrailingZeros64(size1))
+ index := (size1 - 1) >> level
+ // The consistency proof consists of this node (except if size1 is a power of
+ // two, in which case adding this node would be redundant because the client
+ // is assumed to know it from a checkpoint), and nodes of the inclusion proof
+ // into this node in the tree of size2.
+ p := nodes(index, level, size2)
+
+ // Handle the case when size1 is a power of 2.
+ if index == 0 {
+ return p.skipFirst(), nil
+ }
+ return p, nil
+}
+
+// nodes returns the node IDs necessary to prove that the (level, index) node
+// is included in the Merkle tree of the given size.
+func nodes(index uint64, level uint, size uint64) Nodes {
+ // Compute the `fork` node, where the path from root to (level, index) node
+ // diverges from the path to (0, size).
+ //
+ // The sibling of this node is the ephemeral node which represents a subtree
+ // that is not complete in the tree of the given size. To compute the hash
+ // of the ephemeral node, we need all the non-ephemeral nodes that cover the
+ // same range of leaves.
+ //
+ // The `inner` variable is how many layers up from (level, index) the `fork`
+ // and the ephemeral nodes are.
+ inner := bits.Len64(index^(size>>level)) - 1
+ fork := compact.NewNodeID(level+uint(inner), index>>inner)
+
+ begin, end := fork.Coverage()
+ left := compact.RangeSize(0, begin)
+ right := compact.RangeSize(end, size)
+
+ node := compact.NewNodeID(level, index)
+ // Pre-allocate the exact number of nodes for the proof, in order:
+ // - The seed node for which we are building the proof.
+ // - The `inner` nodes at each level up to the fork node.
+ // - The `right` nodes, comprising the ephemeral node.
+ // - The `left` nodes, completing the coverage of the whole [0, size) range.
+ nodes := append(make([]compact.NodeID, 0, 1+inner+right+left), node)
+
+ // The first portion of the proof consists of the siblings for nodes of the
+ // path going up to the level at which the ephemeral node appears.
+ for ; node.Level < fork.Level; node = node.Parent() {
+ nodes = append(nodes, node.Sibling())
+ }
+ // This portion of the proof covers the range [begin, end) under it. The
+ // ranges to the left and to the right from it remain to be covered.
+
+ // Add all the nodes (potentially none) that cover the right range, and
+ // represent the ephemeral node. Reverse them so that the Rehash method can
+ // process hashes in the convenient order, from lower to upper levels.
+ len1 := len(nodes)
+ nodes = compact.RangeNodes(end, size, nodes)
+ reverse(nodes[len(nodes)-right:])
+ len2 := len(nodes)
+ // Add the nodes that cover the left range, ordered increasingly by level.
+ nodes = compact.RangeNodes(0, begin, nodes)
+ reverse(nodes[len(nodes)-left:])
+
+ // nodes[len1:len2] contains the nodes representing the ephemeral node. If
+ // it's empty, make it zero. Note that it can also contain a single node.
+ // Depending on the preference of the layer above, it may or may not be
+ // considered ephemeral.
+ if len1 >= len2 {
+ len1, len2 = 0, 0
+ }
+
+ return Nodes{IDs: nodes, begin: len1, end: len2, ephem: fork.Sibling()}
+}
+
+// Ephem returns the ephemeral node, and indices begin and end, such that
+// IDs[begin:end] slice contains the child nodes of the ephemeral node.
+//
+// The list is empty iff there are no ephemeral nodes in the proof. Some
+// examples of when this can happen: a proof in a perfect tree; an inclusion
+// proof for a leaf in a perfect subtree at the right edge of the tree.
+func (n Nodes) Ephem() (compact.NodeID, int, int) {
+ return n.ephem, n.begin, n.end
+}
+
+// Rehash computes the proof based on the slice of node hashes corresponding to
+// their IDs in the n.IDs field. The slices must be of the same length. The hc
+// parameter computes a node's hash based on hashes of its children.
+//
+// Warning: The passed-in slice of hashes can be modified in-place.
+func (n Nodes) Rehash(h [][]byte, hc func(left, right []byte) []byte) ([][]byte, error) {
+ if got, want := len(h), len(n.IDs); got != want {
+ return nil, fmt.Errorf("got %d hashes but expected %d", got, want)
+ }
+ cursor := 0
+ // Scan the list of node hashes, and store the rehashed list in-place.
+ // Invariant: cursor <= i, and h[:cursor] contains all the hashes of the
+ // rehashed list after scanning h up to index i-1.
+ for i, ln := 0, len(h); i < ln; i, cursor = i+1, cursor+1 {
+ hash := h[i]
+ if i >= n.begin && i < n.end {
+ // Scan the block of node hashes that need rehashing.
+ for i++; i < n.end; i++ {
+ hash = hc(h[i], hash)
+ }
+ i--
+ }
+ h[cursor] = hash
+ }
+ return h[:cursor], nil
+}
+
+func (n Nodes) skipFirst() Nodes {
+ n.IDs = n.IDs[1:]
+ // Fixup the indices into the IDs slice.
+ if n.begin < n.end {
+ n.begin--
+ n.end--
+ }
+ return n
+}
+
+func reverse(ids []compact.NodeID) {
+ for i, j := 0, len(ids)-1; i < j; i, j = i+1, j-1 {
+ ids[i], ids[j] = ids[j], ids[i]
+ }
+}
diff --git a/vendor/github.com/transparency-dev/merkle/proof/verify.go b/vendor/github.com/transparency-dev/merkle/proof/verify.go
new file mode 100644
index 00000000000..d42e1afe36f
--- /dev/null
+++ b/vendor/github.com/transparency-dev/merkle/proof/verify.go
@@ -0,0 +1,176 @@
+// Copyright 2017 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package proof
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math/bits"
+
+ "github.com/transparency-dev/merkle"
+)
+
+// RootMismatchError occurs when an inclusion proof fails.
+type RootMismatchError struct {
+ ExpectedRoot []byte
+ CalculatedRoot []byte
+}
+
+func (e RootMismatchError) Error() string {
+ return fmt.Sprintf("calculated root:\n%v\n does not match expected root:\n%v", e.CalculatedRoot, e.ExpectedRoot)
+}
+
+func verifyMatch(calculated, expected []byte) error {
+ if !bytes.Equal(calculated, expected) {
+ return RootMismatchError{ExpectedRoot: expected, CalculatedRoot: calculated}
+ }
+ return nil
+}
+
+// VerifyInclusion verifies the correctness of the inclusion proof for the leaf
+// with the specified hash and index, relatively to the tree of the given size
+// and root hash. Requires 0 <= index < size.
+func VerifyInclusion(hasher merkle.LogHasher, index, size uint64, leafHash []byte, proof [][]byte, root []byte) error {
+ calcRoot, err := RootFromInclusionProof(hasher, index, size, leafHash, proof)
+ if err != nil {
+ return err
+ }
+ return verifyMatch(calcRoot, root)
+}
+
+// RootFromInclusionProof calculates the expected root hash for a tree of the
+// given size, provided a leaf index and hash with the corresponding inclusion
+// proof. Requires 0 <= index < size.
+func RootFromInclusionProof(hasher merkle.LogHasher, index, size uint64, leafHash []byte, proof [][]byte) ([]byte, error) {
+ if index >= size {
+ return nil, fmt.Errorf("index is beyond size: %d >= %d", index, size)
+ }
+ if got, want := len(leafHash), hasher.Size(); got != want {
+ return nil, fmt.Errorf("leafHash has unexpected size %d, want %d", got, want)
+ }
+
+ inner, border := decompInclProof(index, size)
+ if got, want := len(proof), inner+border; got != want {
+ return nil, fmt.Errorf("wrong proof size %d, want %d", got, want)
+ }
+
+ res := chainInner(hasher, leafHash, proof[:inner], index)
+ res = chainBorderRight(hasher, res, proof[inner:])
+ return res, nil
+}
+
+// VerifyConsistency checks that the passed-in consistency proof is valid
+// between the passed in tree sizes, with respect to the corresponding root
+// hashes. Requires 0 <= size1 <= size2.
+func VerifyConsistency(hasher merkle.LogHasher, size1, size2 uint64, proof [][]byte, root1, root2 []byte) error {
+ switch {
+ case size2 < size1:
+ return fmt.Errorf("size2 (%d) < size1 (%d)", size1, size2)
+ case size1 == size2:
+ if len(proof) > 0 {
+ return errors.New("size1=size2, but proof is not empty")
+ }
+ return verifyMatch(root1, root2)
+ case size1 == 0:
+ // Any size greater than 0 is consistent with size 0.
+ if len(proof) > 0 {
+ return fmt.Errorf("expected empty proof, but got %d components", len(proof))
+ }
+ return nil // Proof OK.
+ case len(proof) == 0:
+ return errors.New("empty proof")
+ }
+
+ inner, border := decompInclProof(size1-1, size2)
+ shift := bits.TrailingZeros64(size1)
+ inner -= shift // Note: shift < inner if size1 < size2.
+
+ // The proof includes the root hash for the sub-tree of size 2^shift.
+ seed, start := proof[0], 1
+ if size1 == 1<> uint(shift) // Start chaining from level |shift|.
+ hash1 := chainInnerRight(hasher, seed, proof[:inner], mask)
+ hash1 = chainBorderRight(hasher, hash1, proof[inner:])
+ if err := verifyMatch(hash1, root1); err != nil {
+ return err
+ }
+
+ // Verify the second root.
+ hash2 := chainInner(hasher, seed, proof[:inner], mask)
+ hash2 = chainBorderRight(hasher, hash2, proof[inner:])
+ return verifyMatch(hash2, root2)
+}
+
+// decompInclProof breaks down inclusion proof for a leaf at the specified
+// |index| in a tree of the specified |size| into 2 components. The splitting
+// point between them is where paths to leaves |index| and |size-1| diverge.
+// Returns lengths of the bottom and upper proof parts correspondingly. The sum
+// of the two determines the correct length of the inclusion proof.
+func decompInclProof(index, size uint64) (int, int) {
+ inner := innerProofSize(index, size)
+ border := bits.OnesCount64(index >> uint(inner))
+ return inner, border
+}
+
+func innerProofSize(index, size uint64) int {
+ return bits.Len64(index ^ (size - 1))
+}
+
+// chainInner computes a subtree hash for a node on or below the tree's right
+// border. Assumes |proof| hashes are ordered from lower levels to upper, and
+// |seed| is the initial subtree/leaf hash on the path located at the specified
+// |index| on its level.
+func chainInner(hasher merkle.LogHasher, seed []byte, proof [][]byte, index uint64) []byte {
+ for i, h := range proof {
+ if (index>>uint(i))&1 == 0 {
+ seed = hasher.HashChildren(seed, h)
+ } else {
+ seed = hasher.HashChildren(h, seed)
+ }
+ }
+ return seed
+}
+
+// chainInnerRight computes a subtree hash like chainInner, but only takes
+// hashes to the left from the path into consideration, which effectively means
+// the result is a hash of the corresponding earlier version of this subtree.
+func chainInnerRight(hasher merkle.LogHasher, seed []byte, proof [][]byte, index uint64) []byte {
+ for i, h := range proof {
+ if (index>>uint(i))&1 == 1 {
+ seed = hasher.HashChildren(h, seed)
+ }
+ }
+ return seed
+}
+
+// chainBorderRight chains proof hashes along tree borders. This differs from
+// inner chaining because |proof| contains only left-side subtree hashes.
+func chainBorderRight(hasher merkle.LogHasher, seed []byte, proof [][]byte) []byte {
+ for _, h := range proof {
+ seed = hasher.HashChildren(h, seed)
+ }
+ return seed
+}
diff --git a/vendor/github.com/transparency-dev/merkle/rfc6962/rfc6962.go b/vendor/github.com/transparency-dev/merkle/rfc6962/rfc6962.go
new file mode 100644
index 00000000000..b04f952ef85
--- /dev/null
+++ b/vendor/github.com/transparency-dev/merkle/rfc6962/rfc6962.go
@@ -0,0 +1,68 @@
+// Copyright 2016 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package rfc6962 provides hashing functionality according to RFC6962.
+package rfc6962
+
+import (
+ "crypto"
+ _ "crypto/sha256" // SHA256 is the default algorithm.
+)
+
+// Domain separation prefixes
+const (
+ RFC6962LeafHashPrefix = 0
+ RFC6962NodeHashPrefix = 1
+)
+
+// DefaultHasher is a SHA256 based LogHasher.
+var DefaultHasher = New(crypto.SHA256)
+
+// Hasher implements the RFC6962 tree hashing algorithm.
+type Hasher struct {
+ crypto.Hash
+}
+
+// New creates a new Hashers.LogHasher on the passed in hash function.
+func New(h crypto.Hash) *Hasher {
+ return &Hasher{Hash: h}
+}
+
+// EmptyRoot returns a special case for an empty tree.
+func (t *Hasher) EmptyRoot() []byte {
+ return t.New().Sum(nil)
+}
+
+// HashLeaf returns the Merkle tree leaf hash of the data passed in through leaf.
+// The data in leaf is prefixed by the LeafHashPrefix.
+func (t *Hasher) HashLeaf(leaf []byte) []byte {
+ h := t.New()
+ h.Write([]byte{RFC6962LeafHashPrefix})
+ h.Write(leaf)
+ return h.Sum(nil)
+}
+
+// HashChildren returns the inner Merkle tree node hash of the two child nodes l and r.
+// The hashed structure is NodeHashPrefix||l||r.
+func (t *Hasher) HashChildren(l, r []byte) []byte {
+ h := t.New()
+ b := append(append(append(
+ make([]byte, 0, 1+len(l)+len(r)),
+ RFC6962NodeHashPrefix),
+ l...),
+ r...)
+
+ h.Write(b)
+ return h.Sum(nil)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/LICENSE b/vendor/go.mongodb.org/mongo-driver/LICENSE
new file mode 100644
index 00000000000..261eeb9e9f8
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson.go b/vendor/go.mongodb.org/mongo-driver/bson/bson.go
new file mode 100644
index 00000000000..a0d81858261
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bson.go
@@ -0,0 +1,50 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer
+// See THIRD-PARTY-NOTICES for original license terms.
+
+package bson // import "go.mongodb.org/mongo-driver/bson"
+
+import (
+ "go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+// Zeroer allows custom struct types to implement a report of zero
+// state. All struct types that don't implement Zeroer or where IsZero
+// returns false are considered to be not zero.
+type Zeroer interface {
+ IsZero() bool
+}
+
+// D is an ordered representation of a BSON document. This type should be used when the order of the elements matters,
+// such as MongoDB command documents. If the order of the elements does not matter, an M should be used instead.
+//
+// A D should not be constructed with duplicate key names, as that can cause undefined server behavior.
+//
+// Example usage:
+//
+// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
+type D = primitive.D
+
+// E represents a BSON element for a D. It is usually used inside a D.
+type E = primitive.E
+
+// M is an unordered representation of a BSON document. This type should be used when the order of the elements does not
+// matter. This type is handled as a regular map[string]interface{} when encoding and decoding. Elements will be
+// serialized in an undefined, random order. If the order of the elements matters, a D should be used instead.
+//
+// Example usage:
+//
+// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159}
+type M = primitive.M
+
+// An A is an ordered representation of a BSON array.
+//
+// Example usage:
+//
+// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}}
+type A = primitive.A
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go
new file mode 100644
index 00000000000..652aa48b853
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go
@@ -0,0 +1,55 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+ "reflect"
+
+ "go.mongodb.org/mongo-driver/bson/bsonrw"
+ "go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// ArrayCodec is the Codec used for bsoncore.Array values.
+//
+// Deprecated: ArrayCodec will not be directly accessible in Go Driver 2.0.
+type ArrayCodec struct{}
+
+var defaultArrayCodec = NewArrayCodec()
+
+// NewArrayCodec returns an ArrayCodec.
+//
+// Deprecated: NewArrayCodec will not be available in Go Driver 2.0. See
+// [ArrayCodec] for more details.
+func NewArrayCodec() *ArrayCodec {
+ return &ArrayCodec{}
+}
+
+// EncodeValue is the ValueEncoder for bsoncore.Array values.
+func (ac *ArrayCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tCoreArray {
+ return ValueEncoderError{Name: "CoreArrayEncodeValue", Types: []reflect.Type{tCoreArray}, Received: val}
+ }
+
+ arr := val.Interface().(bsoncore.Array)
+ return bsonrw.Copier{}.CopyArrayFromBytes(vw, arr)
+}
+
+// DecodeValue is the ValueDecoder for bsoncore.Array values.
+func (ac *ArrayCodec) DecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Type() != tCoreArray {
+ return ValueDecoderError{Name: "CoreArrayDecodeValue", Types: []reflect.Type{tCoreArray}, Received: val}
+ }
+
+ if val.IsNil() {
+ val.Set(reflect.MakeSlice(val.Type(), 0, 0))
+ }
+
+ val.SetLen(0)
+ arr, err := bsonrw.Copier{}.AppendArrayBytes(val.Interface().(bsoncore.Array), vr)
+ val.Set(reflect.ValueOf(arr))
+ return err
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go
new file mode 100644
index 00000000000..0693bd432fe
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go
@@ -0,0 +1,382 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec // import "go.mongodb.org/mongo-driver/bson/bsoncodec"
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "go.mongodb.org/mongo-driver/bson/bsonrw"
+ "go.mongodb.org/mongo-driver/bson/bsontype"
+ "go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+var (
+ emptyValue = reflect.Value{}
+)
+
+// Marshaler is an interface implemented by types that can marshal themselves
+// into a BSON document represented as bytes. The bytes returned must be a valid
+// BSON document if the error is nil.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Marshaler] instead.
+type Marshaler interface {
+ MarshalBSON() ([]byte, error)
+}
+
+// ValueMarshaler is an interface implemented by types that can marshal
+// themselves into a BSON value as bytes. The type must be the valid type for
+// the bytes returned. The bytes and byte type together must be valid if the
+// error is nil.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueMarshaler] instead.
+type ValueMarshaler interface {
+ MarshalBSONValue() (bsontype.Type, []byte, error)
+}
+
+// Unmarshaler is an interface implemented by types that can unmarshal a BSON
+// document representation of themselves. The BSON bytes can be assumed to be
+// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data
+// after returning.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Unmarshaler] instead.
+type Unmarshaler interface {
+ UnmarshalBSON([]byte) error
+}
+
+// ValueUnmarshaler is an interface implemented by types that can unmarshal a
+// BSON value representation of themselves. The BSON bytes and type can be
+// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it
+// wishes to retain the data after returning.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueUnmarshaler] instead.
+type ValueUnmarshaler interface {
+ UnmarshalBSONValue(bsontype.Type, []byte) error
+}
+
+// ValueEncoderError is an error returned from a ValueEncoder when the provided value can't be
+// encoded by the ValueEncoder.
+type ValueEncoderError struct {
+ Name string
+ Types []reflect.Type
+ Kinds []reflect.Kind
+ Received reflect.Value
+}
+
+func (vee ValueEncoderError) Error() string {
+ typeKinds := make([]string, 0, len(vee.Types)+len(vee.Kinds))
+ for _, t := range vee.Types {
+ typeKinds = append(typeKinds, t.String())
+ }
+ for _, k := range vee.Kinds {
+ if k == reflect.Map {
+ typeKinds = append(typeKinds, "map[string]*")
+ continue
+ }
+ typeKinds = append(typeKinds, k.String())
+ }
+ received := vee.Received.Kind().String()
+ if vee.Received.IsValid() {
+ received = vee.Received.Type().String()
+ }
+ return fmt.Sprintf("%s can only encode valid %s, but got %s", vee.Name, strings.Join(typeKinds, ", "), received)
+}
+
+// ValueDecoderError is an error returned from a ValueDecoder when the provided value can't be
+// decoded by the ValueDecoder.
+type ValueDecoderError struct {
+ Name string
+ Types []reflect.Type
+ Kinds []reflect.Kind
+ Received reflect.Value
+}
+
+func (vde ValueDecoderError) Error() string {
+ typeKinds := make([]string, 0, len(vde.Types)+len(vde.Kinds))
+ for _, t := range vde.Types {
+ typeKinds = append(typeKinds, t.String())
+ }
+ for _, k := range vde.Kinds {
+ if k == reflect.Map {
+ typeKinds = append(typeKinds, "map[string]*")
+ continue
+ }
+ typeKinds = append(typeKinds, k.String())
+ }
+ received := vde.Received.Kind().String()
+ if vde.Received.IsValid() {
+ received = vde.Received.Type().String()
+ }
+ return fmt.Sprintf("%s can only decode valid and settable %s, but got %s", vde.Name, strings.Join(typeKinds, ", "), received)
+}
+
+// EncodeContext is the contextual information required for a Codec to encode a
+// value.
+type EncodeContext struct {
+ *Registry
+
+ // MinSize causes the Encoder to marshal Go integer values (int, int8, int16, int32, int64,
+ // uint, uint8, uint16, uint32, or uint64) as the minimum BSON int size (either 32 or 64 bits)
+ // that can represent the integer value.
+ //
+ // Deprecated: Use bson.Encoder.IntMinSize instead.
+ MinSize bool
+
+ errorOnInlineDuplicates bool
+ stringifyMapKeysWithFmt bool
+ nilMapAsEmpty bool
+ nilSliceAsEmpty bool
+ nilByteSliceAsEmpty bool
+ omitZeroStruct bool
+ useJSONStructTags bool
+}
+
+// ErrorOnInlineDuplicates causes the Encoder to return an error if there is a duplicate field in
+// the marshaled BSON when the "inline" struct tag option is set.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead.
+func (ec *EncodeContext) ErrorOnInlineDuplicates() {
+ ec.errorOnInlineDuplicates = true
+}
+
+// StringifyMapKeysWithFmt causes the Encoder to convert Go map keys to BSON document field name
+// strings using fmt.Sprintf() instead of the default string conversion logic.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead.
+func (ec *EncodeContext) StringifyMapKeysWithFmt() {
+ ec.stringifyMapKeysWithFmt = true
+}
+
+// NilMapAsEmpty causes the Encoder to marshal nil Go maps as empty BSON documents instead of BSON
+// null.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead.
+func (ec *EncodeContext) NilMapAsEmpty() {
+ ec.nilMapAsEmpty = true
+}
+
+// NilSliceAsEmpty causes the Encoder to marshal nil Go slices as empty BSON arrays instead of BSON
+// null.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead.
+func (ec *EncodeContext) NilSliceAsEmpty() {
+ ec.nilSliceAsEmpty = true
+}
+
+// NilByteSliceAsEmpty causes the Encoder to marshal nil Go byte slices as empty BSON binary values
+// instead of BSON null.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead.
+func (ec *EncodeContext) NilByteSliceAsEmpty() {
+ ec.nilByteSliceAsEmpty = true
+}
+
+// OmitZeroStruct causes the Encoder to consider the zero value for a struct (e.g. MyStruct{})
+// as empty and omit it from the marshaled BSON when the "omitempty" struct tag option is set.
+//
+// Note that the Encoder only examines exported struct fields when determining if a struct is the
+// zero value. It considers pointers to a zero struct value (e.g. &MyStruct{}) not empty.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead.
+func (ec *EncodeContext) OmitZeroStruct() {
+ ec.omitZeroStruct = true
+}
+
+// UseJSONStructTags causes the Encoder to fall back to using the "json" struct tag if a "bson"
+// struct tag is not specified.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] instead.
+func (ec *EncodeContext) UseJSONStructTags() {
+ ec.useJSONStructTags = true
+}
+
+// DecodeContext is the contextual information required for a Codec to decode a
+// value.
+type DecodeContext struct {
+ *Registry
+
+ // Truncate, if true, instructs decoders to to truncate the fractional part of BSON "double"
+ // values when attempting to unmarshal them into a Go integer (int, int8, int16, int32, int64,
+ // uint, uint8, uint16, uint32, or uint64) struct field. The truncation logic does not apply to
+ // BSON "decimal128" values.
+ //
+ // Deprecated: Use bson.Decoder.AllowTruncatingDoubles instead.
+ Truncate bool
+
+ // Ancestor is the type of a containing document. This is mainly used to determine what type
+ // should be used when decoding an embedded document into an empty interface. For example, if
+ // Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface
+ // will be decoded into a bson.M.
+ //
+ // Deprecated: Use bson.Decoder.DefaultDocumentM or bson.Decoder.DefaultDocumentD instead.
+ Ancestor reflect.Type
+
+ // defaultDocumentType specifies the Go type to decode top-level and nested BSON documents into. In particular, the
+ // usage for this field is restricted to data typed as "interface{}" or "map[string]interface{}". If DocumentType is
+ // set to a type that a BSON document cannot be unmarshaled into (e.g. "string"), unmarshalling will result in an
+ // error. DocumentType overrides the Ancestor field.
+ defaultDocumentType reflect.Type
+
+ binaryAsSlice bool
+ useJSONStructTags bool
+ useLocalTimeZone bool
+ zeroMaps bool
+ zeroStructs bool
+}
+
+// BinaryAsSlice causes the Decoder to unmarshal BSON binary field values that are the "Generic" or
+// "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead.
+func (dc *DecodeContext) BinaryAsSlice() {
+ dc.binaryAsSlice = true
+}
+
+// UseJSONStructTags causes the Decoder to fall back to using the "json" struct tag if a "bson"
+// struct tag is not specified.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead.
+func (dc *DecodeContext) UseJSONStructTags() {
+ dc.useJSONStructTags = true
+}
+
+// UseLocalTimeZone causes the Decoder to unmarshal time.Time values in the local timezone instead
+// of the UTC timezone.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead.
+func (dc *DecodeContext) UseLocalTimeZone() {
+ dc.useLocalTimeZone = true
+}
+
+// ZeroMaps causes the Decoder to delete any existing values from Go maps in the destination value
+// passed to Decode before unmarshaling BSON documents into them.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead.
+func (dc *DecodeContext) ZeroMaps() {
+ dc.zeroMaps = true
+}
+
+// ZeroStructs causes the Decoder to delete any existing values from Go structs in the destination
+// value passed to Decode before unmarshaling BSON documents into them.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead.
+func (dc *DecodeContext) ZeroStructs() {
+ dc.zeroStructs = true
+}
+
+// DefaultDocumentM causes the Decoder to always unmarshal documents into the primitive.M type. This
+// behavior is restricted to data typed as "interface{}" or "map[string]interface{}".
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentM] instead.
+func (dc *DecodeContext) DefaultDocumentM() {
+ dc.defaultDocumentType = reflect.TypeOf(primitive.M{})
+}
+
+// DefaultDocumentD causes the Decoder to always unmarshal documents into the primitive.D type. This
+// behavior is restricted to data typed as "interface{}" or "map[string]interface{}".
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentD] instead.
+func (dc *DecodeContext) DefaultDocumentD() {
+ dc.defaultDocumentType = reflect.TypeOf(primitive.D{})
+}
+
+// ValueCodec is an interface for encoding and decoding a reflect.Value.
+// values.
+//
+// Deprecated: Use [ValueEncoder] and [ValueDecoder] instead.
+type ValueCodec interface {
+ ValueEncoder
+ ValueDecoder
+}
+
+// ValueEncoder is the interface implemented by types that can handle the encoding of a value.
+type ValueEncoder interface {
+ EncodeValue(EncodeContext, bsonrw.ValueWriter, reflect.Value) error
+}
+
+// ValueEncoderFunc is an adapter function that allows a function with the correct signature to be
+// used as a ValueEncoder.
+type ValueEncoderFunc func(EncodeContext, bsonrw.ValueWriter, reflect.Value) error
+
+// EncodeValue implements the ValueEncoder interface.
+func (fn ValueEncoderFunc) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ return fn(ec, vw, val)
+}
+
+// ValueDecoder is the interface implemented by types that can handle the decoding of a value.
+type ValueDecoder interface {
+ DecodeValue(DecodeContext, bsonrw.ValueReader, reflect.Value) error
+}
+
+// ValueDecoderFunc is an adapter function that allows a function with the correct signature to be
+// used as a ValueDecoder.
+type ValueDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) error
+
+// DecodeValue implements the ValueDecoder interface.
+func (fn ValueDecoderFunc) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ return fn(dc, vr, val)
+}
+
+// typeDecoder is the interface implemented by types that can handle the decoding of a value given its type.
+type typeDecoder interface {
+ decodeType(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error)
+}
+
+// typeDecoderFunc is an adapter function that allows a function with the correct signature to be used as a typeDecoder.
+type typeDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error)
+
+func (fn typeDecoderFunc) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ return fn(dc, vr, t)
+}
+
+// decodeAdapter allows two functions with the correct signatures to be used as both a ValueDecoder and typeDecoder.
+type decodeAdapter struct {
+ ValueDecoderFunc
+ typeDecoderFunc
+}
+
+var _ ValueDecoder = decodeAdapter{}
+var _ typeDecoder = decodeAdapter{}
+
+// decodeTypeOrValue calls decoder.decodeType is decoder is a typeDecoder. Otherwise, it allocates a new element of type
+// t and calls decoder.DecodeValue on it.
+func decodeTypeOrValue(decoder ValueDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ td, _ := decoder.(typeDecoder)
+ return decodeTypeOrValueWithInfo(decoder, td, dc, vr, t, true)
+}
+
+func decodeTypeOrValueWithInfo(vd ValueDecoder, td typeDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type, convert bool) (reflect.Value, error) {
+ if td != nil {
+ val, err := td.decodeType(dc, vr, t)
+ if err == nil && convert && val.Type() != t {
+ // This conversion step is necessary for slices and maps. If a user declares variables like:
+ //
+ // type myBool bool
+ // var m map[string]myBool
+ //
+ // and tries to decode BSON bytes into the map, the decoding will fail if this conversion is not present
+ // because we'll try to assign a value of type bool to one of type myBool.
+ val = val.Convert(t)
+ }
+ return val, err
+ }
+
+ val := reflect.New(t).Elem()
+ err := vd.DecodeValue(dc, vr, val)
+ return val, err
+}
+
+// CodecZeroer is the interface implemented by Codecs that can also determine if
+// a value of the type that would be encoded is zero.
+//
+// Deprecated: Defining custom rules for the zero/empty value will not be supported in Go Driver
+// 2.0. Users who want to omit empty complex values should use a pointer field and set the value to
+// nil instead.
+type CodecZeroer interface {
+ IsTypeZero(interface{}) bool
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go
new file mode 100644
index 00000000000..0134b5a94be
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go
@@ -0,0 +1,138 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+ "fmt"
+ "reflect"
+
+ "go.mongodb.org/mongo-driver/bson/bsonoptions"
+ "go.mongodb.org/mongo-driver/bson/bsonrw"
+ "go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+// ByteSliceCodec is the Codec used for []byte values.
+//
+// Deprecated: ByteSliceCodec will not be directly configurable in Go Driver
+// 2.0. To configure the byte slice encode and decode behavior, use the
+// configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or
+// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the byte slice
+// encode and decode behavior for a mongo.Client, use
+// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions].
+//
+// For example, to configure a mongo.Client to encode nil byte slices as empty
+// BSON binary values, use:
+//
+// opt := options.Client().SetBSONOptions(&options.BSONOptions{
+// NilByteSliceAsEmpty: true,
+// })
+//
+// See the deprecation notice for each field in ByteSliceCodec for the
+// corresponding settings.
+type ByteSliceCodec struct {
+ // EncodeNilAsEmpty causes EncodeValue to marshal nil Go byte slices as empty BSON binary values
+ // instead of BSON null.
+ //
+ // Deprecated: Use bson.Encoder.NilByteSliceAsEmpty or options.BSONOptions.NilByteSliceAsEmpty
+ // instead.
+ EncodeNilAsEmpty bool
+}
+
+var (
+ defaultByteSliceCodec = NewByteSliceCodec()
+
+ // Assert that defaultByteSliceCodec satisfies the typeDecoder interface, which allows it to be
+ // used by collection type decoders (e.g. map, slice, etc) to set individual values in a
+ // collection.
+ _ typeDecoder = defaultByteSliceCodec
+)
+
+// NewByteSliceCodec returns a ByteSliceCodec with options opts.
+//
+// Deprecated: NewByteSliceCodec will not be available in Go Driver 2.0. See
+// [ByteSliceCodec] for more details.
+func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec {
+ byteSliceOpt := bsonoptions.MergeByteSliceCodecOptions(opts...)
+ codec := ByteSliceCodec{}
+ if byteSliceOpt.EncodeNilAsEmpty != nil {
+ codec.EncodeNilAsEmpty = *byteSliceOpt.EncodeNilAsEmpty
+ }
+ return &codec
+}
+
+// EncodeValue is the ValueEncoder for []byte.
+func (bsc *ByteSliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tByteSlice {
+ return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val}
+ }
+ if val.IsNil() && !bsc.EncodeNilAsEmpty && !ec.nilByteSliceAsEmpty {
+ return vw.WriteNull()
+ }
+ return vw.WriteBinary(val.Interface().([]byte))
+}
+
+func (bsc *ByteSliceCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ if t != tByteSlice {
+ return emptyValue, ValueDecoderError{
+ Name: "ByteSliceDecodeValue",
+ Types: []reflect.Type{tByteSlice},
+ Received: reflect.Zero(t),
+ }
+ }
+
+ var data []byte
+ var err error
+ switch vrType := vr.Type(); vrType {
+ case bsontype.String:
+ str, err := vr.ReadString()
+ if err != nil {
+ return emptyValue, err
+ }
+ data = []byte(str)
+ case bsontype.Symbol:
+ sym, err := vr.ReadSymbol()
+ if err != nil {
+ return emptyValue, err
+ }
+ data = []byte(sym)
+ case bsontype.Binary:
+ var subtype byte
+ data, subtype, err = vr.ReadBinary()
+ if err != nil {
+ return emptyValue, err
+ }
+ if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld {
+ return emptyValue, decodeBinaryError{subtype: subtype, typeName: "[]byte"}
+ }
+ case bsontype.Null:
+ err = vr.ReadNull()
+ case bsontype.Undefined:
+ err = vr.ReadUndefined()
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into a []byte", vrType)
+ }
+ if err != nil {
+ return emptyValue, err
+ }
+
+ return reflect.ValueOf(data), nil
+}
+
+// DecodeValue is the ValueDecoder for []byte.
+func (bsc *ByteSliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Type() != tByteSlice {
+ return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val}
+ }
+
+ elem, err := bsc.decodeType(dc, vr, tByteSlice)
+ if err != nil {
+ return err
+ }
+
+ val.Set(elem)
+ return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go
new file mode 100644
index 00000000000..844b50299f2
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go
@@ -0,0 +1,166 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+ "reflect"
+ "sync"
+ "sync/atomic"
+)
+
+// Runtime check that the kind encoder and decoder caches can store any valid
+// reflect.Kind constant.
+func init() {
+ if s := reflect.Kind(len(kindEncoderCache{}.entries)).String(); s != "kind27" {
+ panic("The capacity of kindEncoderCache is too small.\n" +
+ "This is due to a new type being added to reflect.Kind.")
+ }
+}
+
+// statically assert array size
+var _ = (kindEncoderCache{}).entries[reflect.UnsafePointer]
+var _ = (kindDecoderCache{}).entries[reflect.UnsafePointer]
+
+type typeEncoderCache struct {
+ cache sync.Map // map[reflect.Type]ValueEncoder
+}
+
+func (c *typeEncoderCache) Store(rt reflect.Type, enc ValueEncoder) {
+ c.cache.Store(rt, enc)
+}
+
+func (c *typeEncoderCache) Load(rt reflect.Type) (ValueEncoder, bool) {
+ if v, _ := c.cache.Load(rt); v != nil {
+ return v.(ValueEncoder), true
+ }
+ return nil, false
+}
+
+func (c *typeEncoderCache) LoadOrStore(rt reflect.Type, enc ValueEncoder) ValueEncoder {
+ if v, loaded := c.cache.LoadOrStore(rt, enc); loaded {
+ enc = v.(ValueEncoder)
+ }
+ return enc
+}
+
+func (c *typeEncoderCache) Clone() *typeEncoderCache {
+ cc := new(typeEncoderCache)
+ c.cache.Range(func(k, v interface{}) bool {
+ if k != nil && v != nil {
+ cc.cache.Store(k, v)
+ }
+ return true
+ })
+ return cc
+}
+
+type typeDecoderCache struct {
+ cache sync.Map // map[reflect.Type]ValueDecoder
+}
+
+func (c *typeDecoderCache) Store(rt reflect.Type, dec ValueDecoder) {
+ c.cache.Store(rt, dec)
+}
+
+func (c *typeDecoderCache) Load(rt reflect.Type) (ValueDecoder, bool) {
+ if v, _ := c.cache.Load(rt); v != nil {
+ return v.(ValueDecoder), true
+ }
+ return nil, false
+}
+
+func (c *typeDecoderCache) LoadOrStore(rt reflect.Type, dec ValueDecoder) ValueDecoder {
+ if v, loaded := c.cache.LoadOrStore(rt, dec); loaded {
+ dec = v.(ValueDecoder)
+ }
+ return dec
+}
+
+func (c *typeDecoderCache) Clone() *typeDecoderCache {
+ cc := new(typeDecoderCache)
+ c.cache.Range(func(k, v interface{}) bool {
+ if k != nil && v != nil {
+ cc.cache.Store(k, v)
+ }
+ return true
+ })
+ return cc
+}
+
+// atomic.Value requires that all calls to Store() have the same concrete type
+// so we wrap the ValueEncoder with a kindEncoderCacheEntry to ensure the type
+// is always the same (since different concrete types may implement the
+// ValueEncoder interface).
+type kindEncoderCacheEntry struct {
+ enc ValueEncoder
+}
+
+type kindEncoderCache struct {
+ entries [reflect.UnsafePointer + 1]atomic.Value // *kindEncoderCacheEntry
+}
+
+func (c *kindEncoderCache) Store(rt reflect.Kind, enc ValueEncoder) {
+ if enc != nil && rt < reflect.Kind(len(c.entries)) {
+ c.entries[rt].Store(&kindEncoderCacheEntry{enc: enc})
+ }
+}
+
+func (c *kindEncoderCache) Load(rt reflect.Kind) (ValueEncoder, bool) {
+ if rt < reflect.Kind(len(c.entries)) {
+ if ent, ok := c.entries[rt].Load().(*kindEncoderCacheEntry); ok {
+ return ent.enc, ent.enc != nil
+ }
+ }
+ return nil, false
+}
+
+func (c *kindEncoderCache) Clone() *kindEncoderCache {
+ cc := new(kindEncoderCache)
+ for i, v := range c.entries {
+ if val := v.Load(); val != nil {
+ cc.entries[i].Store(val)
+ }
+ }
+ return cc
+}
+
+// atomic.Value requires that all calls to Store() have the same concrete type
+// so we wrap the ValueDecoder with a kindDecoderCacheEntry to ensure the type
+// is always the same (since different concrete types may implement the
+// ValueDecoder interface).
+type kindDecoderCacheEntry struct {
+ dec ValueDecoder
+}
+
+type kindDecoderCache struct {
+ entries [reflect.UnsafePointer + 1]atomic.Value // *kindDecoderCacheEntry
+}
+
+func (c *kindDecoderCache) Store(rt reflect.Kind, dec ValueDecoder) {
+ if rt < reflect.Kind(len(c.entries)) {
+ c.entries[rt].Store(&kindDecoderCacheEntry{dec: dec})
+ }
+}
+
+func (c *kindDecoderCache) Load(rt reflect.Kind) (ValueDecoder, bool) {
+ if rt < reflect.Kind(len(c.entries)) {
+ if ent, ok := c.entries[rt].Load().(*kindDecoderCacheEntry); ok {
+ return ent.dec, ent.dec != nil
+ }
+ }
+ return nil, false
+}
+
+func (c *kindDecoderCache) Clone() *kindDecoderCache {
+ cc := new(kindDecoderCache)
+ for i, v := range c.entries {
+ if val := v.Load(); val != nil {
+ cc.entries[i].Store(val)
+ }
+ }
+ return cc
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go
new file mode 100644
index 00000000000..cb8180f25cc
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go
@@ -0,0 +1,63 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+ "reflect"
+
+ "go.mongodb.org/mongo-driver/bson/bsonrw"
+)
+
+// condAddrEncoder is the encoder used when a pointer to the encoding value has an encoder.
+type condAddrEncoder struct {
+ canAddrEnc ValueEncoder
+ elseEnc ValueEncoder
+}
+
+var _ ValueEncoder = (*condAddrEncoder)(nil)
+
+// newCondAddrEncoder returns an condAddrEncoder.
+func newCondAddrEncoder(canAddrEnc, elseEnc ValueEncoder) *condAddrEncoder {
+ encoder := condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc}
+ return &encoder
+}
+
+// EncodeValue is the ValueEncoderFunc for a value that may be addressable.
+func (cae *condAddrEncoder) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if val.CanAddr() {
+ return cae.canAddrEnc.EncodeValue(ec, vw, val)
+ }
+ if cae.elseEnc != nil {
+ return cae.elseEnc.EncodeValue(ec, vw, val)
+ }
+ return ErrNoEncoder{Type: val.Type()}
+}
+
+// condAddrDecoder is the decoder used when a pointer to the value has a decoder.
+type condAddrDecoder struct {
+ canAddrDec ValueDecoder
+ elseDec ValueDecoder
+}
+
+var _ ValueDecoder = (*condAddrDecoder)(nil)
+
+// newCondAddrDecoder returns an CondAddrDecoder.
+func newCondAddrDecoder(canAddrDec, elseDec ValueDecoder) *condAddrDecoder {
+ decoder := condAddrDecoder{canAddrDec: canAddrDec, elseDec: elseDec}
+ return &decoder
+}
+
+// DecodeValue is the ValueDecoderFunc for a value that may be addressable.
+func (cad *condAddrDecoder) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if val.CanAddr() {
+ return cad.canAddrDec.DecodeValue(dc, vr, val)
+ }
+ if cad.elseDec != nil {
+ return cad.elseDec.DecodeValue(dc, vr, val)
+ }
+ return ErrNoDecoder{Type: val.Type()}
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go
new file mode 100644
index 00000000000..8702d6d39e0
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go
@@ -0,0 +1,1819 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "net/url"
+ "reflect"
+ "strconv"
+ "time"
+
+ "go.mongodb.org/mongo-driver/bson/bsonrw"
+ "go.mongodb.org/mongo-driver/bson/bsontype"
+ "go.mongodb.org/mongo-driver/bson/primitive"
+ "go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+var (
+ defaultValueDecoders DefaultValueDecoders
+ errCannotTruncate = errors.New("float64 can only be truncated to a lower precision type when truncation is enabled")
+)
+
+type decodeBinaryError struct {
+ subtype byte
+ typeName string
+}
+
+func (d decodeBinaryError) Error() string {
+ return fmt.Sprintf("only binary values with subtype 0x00 or 0x02 can be decoded into %s, but got subtype %v", d.typeName, d.subtype)
+}
+
+func newDefaultStructCodec() *StructCodec {
+ codec, err := NewStructCodec(DefaultStructTagParser)
+ if err != nil {
+ // This function is called from the codec registration path, so errors can't be propagated. If there's an error
+ // constructing the StructCodec, we panic to avoid losing it.
+ panic(fmt.Errorf("error creating default StructCodec: %w", err))
+ }
+ return codec
+}
+
+// DefaultValueDecoders is a namespace type for the default ValueDecoders used
+// when creating a registry.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+type DefaultValueDecoders struct{}
+
+// RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with
+// the provided RegistryBuilder.
+//
+// There is no support for decoding map[string]interface{} because there is no decoder for
+// interface{}, so users must either register this decoder themselves or use the
+// EmptyInterfaceDecoder available in the bson package.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) {
+ if rb == nil {
+ panic(errors.New("argument to RegisterDefaultDecoders must not be nil"))
+ }
+
+ intDecoder := decodeAdapter{dvd.IntDecodeValue, dvd.intDecodeType}
+ floatDecoder := decodeAdapter{dvd.FloatDecodeValue, dvd.floatDecodeType}
+
+ rb.
+ RegisterTypeDecoder(tD, ValueDecoderFunc(dvd.DDecodeValue)).
+ RegisterTypeDecoder(tBinary, decodeAdapter{dvd.BinaryDecodeValue, dvd.binaryDecodeType}).
+ RegisterTypeDecoder(tUndefined, decodeAdapter{dvd.UndefinedDecodeValue, dvd.undefinedDecodeType}).
+ RegisterTypeDecoder(tDateTime, decodeAdapter{dvd.DateTimeDecodeValue, dvd.dateTimeDecodeType}).
+ RegisterTypeDecoder(tNull, decodeAdapter{dvd.NullDecodeValue, dvd.nullDecodeType}).
+ RegisterTypeDecoder(tRegex, decodeAdapter{dvd.RegexDecodeValue, dvd.regexDecodeType}).
+ RegisterTypeDecoder(tDBPointer, decodeAdapter{dvd.DBPointerDecodeValue, dvd.dBPointerDecodeType}).
+ RegisterTypeDecoder(tTimestamp, decodeAdapter{dvd.TimestampDecodeValue, dvd.timestampDecodeType}).
+ RegisterTypeDecoder(tMinKey, decodeAdapter{dvd.MinKeyDecodeValue, dvd.minKeyDecodeType}).
+ RegisterTypeDecoder(tMaxKey, decodeAdapter{dvd.MaxKeyDecodeValue, dvd.maxKeyDecodeType}).
+ RegisterTypeDecoder(tJavaScript, decodeAdapter{dvd.JavaScriptDecodeValue, dvd.javaScriptDecodeType}).
+ RegisterTypeDecoder(tSymbol, decodeAdapter{dvd.SymbolDecodeValue, dvd.symbolDecodeType}).
+ RegisterTypeDecoder(tByteSlice, defaultByteSliceCodec).
+ RegisterTypeDecoder(tTime, defaultTimeCodec).
+ RegisterTypeDecoder(tEmpty, defaultEmptyInterfaceCodec).
+ RegisterTypeDecoder(tCoreArray, defaultArrayCodec).
+ RegisterTypeDecoder(tOID, decodeAdapter{dvd.ObjectIDDecodeValue, dvd.objectIDDecodeType}).
+ RegisterTypeDecoder(tDecimal, decodeAdapter{dvd.Decimal128DecodeValue, dvd.decimal128DecodeType}).
+ RegisterTypeDecoder(tJSONNumber, decodeAdapter{dvd.JSONNumberDecodeValue, dvd.jsonNumberDecodeType}).
+ RegisterTypeDecoder(tURL, decodeAdapter{dvd.URLDecodeValue, dvd.urlDecodeType}).
+ RegisterTypeDecoder(tCoreDocument, ValueDecoderFunc(dvd.CoreDocumentDecodeValue)).
+ RegisterTypeDecoder(tCodeWithScope, decodeAdapter{dvd.CodeWithScopeDecodeValue, dvd.codeWithScopeDecodeType}).
+ RegisterDefaultDecoder(reflect.Bool, decodeAdapter{dvd.BooleanDecodeValue, dvd.booleanDecodeType}).
+ RegisterDefaultDecoder(reflect.Int, intDecoder).
+ RegisterDefaultDecoder(reflect.Int8, intDecoder).
+ RegisterDefaultDecoder(reflect.Int16, intDecoder).
+ RegisterDefaultDecoder(reflect.Int32, intDecoder).
+ RegisterDefaultDecoder(reflect.Int64, intDecoder).
+ RegisterDefaultDecoder(reflect.Uint, defaultUIntCodec).
+ RegisterDefaultDecoder(reflect.Uint8, defaultUIntCodec).
+ RegisterDefaultDecoder(reflect.Uint16, defaultUIntCodec).
+ RegisterDefaultDecoder(reflect.Uint32, defaultUIntCodec).
+ RegisterDefaultDecoder(reflect.Uint64, defaultUIntCodec).
+ RegisterDefaultDecoder(reflect.Float32, floatDecoder).
+ RegisterDefaultDecoder(reflect.Float64, floatDecoder).
+ RegisterDefaultDecoder(reflect.Array, ValueDecoderFunc(dvd.ArrayDecodeValue)).
+ RegisterDefaultDecoder(reflect.Map, defaultMapCodec).
+ RegisterDefaultDecoder(reflect.Slice, defaultSliceCodec).
+ RegisterDefaultDecoder(reflect.String, defaultStringCodec).
+ RegisterDefaultDecoder(reflect.Struct, newDefaultStructCodec()).
+ RegisterDefaultDecoder(reflect.Ptr, NewPointerCodec()).
+ RegisterTypeMapEntry(bsontype.Double, tFloat64).
+ RegisterTypeMapEntry(bsontype.String, tString).
+ RegisterTypeMapEntry(bsontype.Array, tA).
+ RegisterTypeMapEntry(bsontype.Binary, tBinary).
+ RegisterTypeMapEntry(bsontype.Undefined, tUndefined).
+ RegisterTypeMapEntry(bsontype.ObjectID, tOID).
+ RegisterTypeMapEntry(bsontype.Boolean, tBool).
+ RegisterTypeMapEntry(bsontype.DateTime, tDateTime).
+ RegisterTypeMapEntry(bsontype.Regex, tRegex).
+ RegisterTypeMapEntry(bsontype.DBPointer, tDBPointer).
+ RegisterTypeMapEntry(bsontype.JavaScript, tJavaScript).
+ RegisterTypeMapEntry(bsontype.Symbol, tSymbol).
+ RegisterTypeMapEntry(bsontype.CodeWithScope, tCodeWithScope).
+ RegisterTypeMapEntry(bsontype.Int32, tInt32).
+ RegisterTypeMapEntry(bsontype.Int64, tInt64).
+ RegisterTypeMapEntry(bsontype.Timestamp, tTimestamp).
+ RegisterTypeMapEntry(bsontype.Decimal128, tDecimal).
+ RegisterTypeMapEntry(bsontype.MinKey, tMinKey).
+ RegisterTypeMapEntry(bsontype.MaxKey, tMaxKey).
+ RegisterTypeMapEntry(bsontype.Type(0), tD).
+ RegisterTypeMapEntry(bsontype.EmbeddedDocument, tD).
+ RegisterHookDecoder(tValueUnmarshaler, ValueDecoderFunc(dvd.ValueUnmarshalerDecodeValue)).
+ RegisterHookDecoder(tUnmarshaler, ValueDecoderFunc(dvd.UnmarshalerDecodeValue))
+}
+
+// DDecodeValue is the ValueDecoderFunc for primitive.D instances.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.IsValid() || !val.CanSet() || val.Type() != tD {
+ return ValueDecoderError{Name: "DDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val}
+ }
+
+ switch vrType := vr.Type(); vrType {
+ case bsontype.Type(0), bsontype.EmbeddedDocument:
+ dc.Ancestor = tD
+ case bsontype.Null:
+ val.Set(reflect.Zero(val.Type()))
+ return vr.ReadNull()
+ default:
+ return fmt.Errorf("cannot decode %v into a primitive.D", vrType)
+ }
+
+ dr, err := vr.ReadDocument()
+ if err != nil {
+ return err
+ }
+
+ decoder, err := dc.LookupDecoder(tEmpty)
+ if err != nil {
+ return err
+ }
+ tEmptyTypeDecoder, _ := decoder.(typeDecoder)
+
+ // Use the elements in the provided value if it's non nil. Otherwise, allocate a new D instance.
+ var elems primitive.D
+ if !val.IsNil() {
+ val.SetLen(0)
+ elems = val.Interface().(primitive.D)
+ } else {
+ elems = make(primitive.D, 0)
+ }
+
+ for {
+ key, elemVr, err := dr.ReadElement()
+ if errors.Is(err, bsonrw.ErrEOD) {
+ break
+ } else if err != nil {
+ return err
+ }
+
+ // Pass false for convert because we don't need to call reflect.Value.Convert for tEmpty.
+ elem, err := decodeTypeOrValueWithInfo(decoder, tEmptyTypeDecoder, dc, elemVr, tEmpty, false)
+ if err != nil {
+ return err
+ }
+
+ elems = append(elems, primitive.E{Key: key, Value: elem.Interface()})
+ }
+
+ val.Set(reflect.ValueOf(elems))
+ return nil
+}
+
+func (dvd DefaultValueDecoders) booleanDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ if t.Kind() != reflect.Bool {
+ return emptyValue, ValueDecoderError{
+ Name: "BooleanDecodeValue",
+ Kinds: []reflect.Kind{reflect.Bool},
+ Received: reflect.Zero(t),
+ }
+ }
+
+ var b bool
+ var err error
+ switch vrType := vr.Type(); vrType {
+ case bsontype.Int32:
+ i32, err := vr.ReadInt32()
+ if err != nil {
+ return emptyValue, err
+ }
+ b = (i32 != 0)
+ case bsontype.Int64:
+ i64, err := vr.ReadInt64()
+ if err != nil {
+ return emptyValue, err
+ }
+ b = (i64 != 0)
+ case bsontype.Double:
+ f64, err := vr.ReadDouble()
+ if err != nil {
+ return emptyValue, err
+ }
+ b = (f64 != 0)
+ case bsontype.Boolean:
+ b, err = vr.ReadBoolean()
+ case bsontype.Null:
+ err = vr.ReadNull()
+ case bsontype.Undefined:
+ err = vr.ReadUndefined()
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into a boolean", vrType)
+ }
+ if err != nil {
+ return emptyValue, err
+ }
+
+ return reflect.ValueOf(b), nil
+}
+
+// BooleanDecodeValue is the ValueDecoderFunc for bool types.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) BooleanDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.IsValid() || !val.CanSet() || val.Kind() != reflect.Bool {
+ return ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val}
+ }
+
+ elem, err := dvd.booleanDecodeType(dctx, vr, val.Type())
+ if err != nil {
+ return err
+ }
+
+ val.SetBool(elem.Bool())
+ return nil
+}
+
+func (DefaultValueDecoders) intDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ var i64 int64
+ var err error
+ switch vrType := vr.Type(); vrType {
+ case bsontype.Int32:
+ i32, err := vr.ReadInt32()
+ if err != nil {
+ return emptyValue, err
+ }
+ i64 = int64(i32)
+ case bsontype.Int64:
+ i64, err = vr.ReadInt64()
+ if err != nil {
+ return emptyValue, err
+ }
+ case bsontype.Double:
+ f64, err := vr.ReadDouble()
+ if err != nil {
+ return emptyValue, err
+ }
+ if !dc.Truncate && math.Floor(f64) != f64 {
+ return emptyValue, errCannotTruncate
+ }
+ if f64 > float64(math.MaxInt64) {
+ return emptyValue, fmt.Errorf("%g overflows int64", f64)
+ }
+ i64 = int64(f64)
+ case bsontype.Boolean:
+ b, err := vr.ReadBoolean()
+ if err != nil {
+ return emptyValue, err
+ }
+ if b {
+ i64 = 1
+ }
+ case bsontype.Null:
+ if err = vr.ReadNull(); err != nil {
+ return emptyValue, err
+ }
+ case bsontype.Undefined:
+ if err = vr.ReadUndefined(); err != nil {
+ return emptyValue, err
+ }
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType)
+ }
+
+ switch t.Kind() {
+ case reflect.Int8:
+ if i64 < math.MinInt8 || i64 > math.MaxInt8 {
+ return emptyValue, fmt.Errorf("%d overflows int8", i64)
+ }
+
+ return reflect.ValueOf(int8(i64)), nil
+ case reflect.Int16:
+ if i64 < math.MinInt16 || i64 > math.MaxInt16 {
+ return emptyValue, fmt.Errorf("%d overflows int16", i64)
+ }
+
+ return reflect.ValueOf(int16(i64)), nil
+ case reflect.Int32:
+ if i64 < math.MinInt32 || i64 > math.MaxInt32 {
+ return emptyValue, fmt.Errorf("%d overflows int32", i64)
+ }
+
+ return reflect.ValueOf(int32(i64)), nil
+ case reflect.Int64:
+ return reflect.ValueOf(i64), nil
+ case reflect.Int:
+ if i64 > math.MaxInt { // Can we fit this inside of an int
+ return emptyValue, fmt.Errorf("%d overflows int", i64)
+ }
+
+ return reflect.ValueOf(int(i64)), nil
+ default:
+ return emptyValue, ValueDecoderError{
+ Name: "IntDecodeValue",
+ Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int},
+ Received: reflect.Zero(t),
+ }
+ }
+}
+
+// IntDecodeValue is the ValueDecoderFunc for int types.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() {
+ return ValueDecoderError{
+ Name: "IntDecodeValue",
+ Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int},
+ Received: val,
+ }
+ }
+
+ elem, err := dvd.intDecodeType(dc, vr, val.Type())
+ if err != nil {
+ return err
+ }
+
+ val.SetInt(elem.Int())
+ return nil
+}
+
+// UintDecodeValue is the ValueDecoderFunc for uint types.
+//
+// Deprecated: UintDecodeValue is not registered by default. Use UintCodec.DecodeValue instead.
+func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ var i64 int64
+ var err error
+ switch vr.Type() {
+ case bsontype.Int32:
+ i32, err := vr.ReadInt32()
+ if err != nil {
+ return err
+ }
+ i64 = int64(i32)
+ case bsontype.Int64:
+ i64, err = vr.ReadInt64()
+ if err != nil {
+ return err
+ }
+ case bsontype.Double:
+ f64, err := vr.ReadDouble()
+ if err != nil {
+ return err
+ }
+ if !dc.Truncate && math.Floor(f64) != f64 {
+ return errors.New("UintDecodeValue can only truncate float64 to an integer type when truncation is enabled")
+ }
+ if f64 > float64(math.MaxInt64) {
+ return fmt.Errorf("%g overflows int64", f64)
+ }
+ i64 = int64(f64)
+ case bsontype.Boolean:
+ b, err := vr.ReadBoolean()
+ if err != nil {
+ return err
+ }
+ if b {
+ i64 = 1
+ }
+ default:
+ return fmt.Errorf("cannot decode %v into an integer type", vr.Type())
+ }
+
+ if !val.CanSet() {
+ return ValueDecoderError{
+ Name: "UintDecodeValue",
+ Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
+ Received: val,
+ }
+ }
+
+ switch val.Kind() {
+ case reflect.Uint8:
+ if i64 < 0 || i64 > math.MaxUint8 {
+ return fmt.Errorf("%d overflows uint8", i64)
+ }
+ case reflect.Uint16:
+ if i64 < 0 || i64 > math.MaxUint16 {
+ return fmt.Errorf("%d overflows uint16", i64)
+ }
+ case reflect.Uint32:
+ if i64 < 0 || i64 > math.MaxUint32 {
+ return fmt.Errorf("%d overflows uint32", i64)
+ }
+ case reflect.Uint64:
+ if i64 < 0 {
+ return fmt.Errorf("%d overflows uint64", i64)
+ }
+ case reflect.Uint:
+ if i64 < 0 || uint64(i64) > uint64(math.MaxUint) { // Can we fit this inside of an uint
+ return fmt.Errorf("%d overflows uint", i64)
+ }
+ default:
+ return ValueDecoderError{
+ Name: "UintDecodeValue",
+ Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
+ Received: val,
+ }
+ }
+
+ val.SetUint(uint64(i64))
+ return nil
+}
+
+func (dvd DefaultValueDecoders) floatDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ var f float64
+ var err error
+ switch vrType := vr.Type(); vrType {
+ case bsontype.Int32:
+ i32, err := vr.ReadInt32()
+ if err != nil {
+ return emptyValue, err
+ }
+ f = float64(i32)
+ case bsontype.Int64:
+ i64, err := vr.ReadInt64()
+ if err != nil {
+ return emptyValue, err
+ }
+ f = float64(i64)
+ case bsontype.Double:
+ f, err = vr.ReadDouble()
+ if err != nil {
+ return emptyValue, err
+ }
+ case bsontype.Boolean:
+ b, err := vr.ReadBoolean()
+ if err != nil {
+ return emptyValue, err
+ }
+ if b {
+ f = 1
+ }
+ case bsontype.Null:
+ if err = vr.ReadNull(); err != nil {
+ return emptyValue, err
+ }
+ case bsontype.Undefined:
+ if err = vr.ReadUndefined(); err != nil {
+ return emptyValue, err
+ }
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into a float32 or float64 type", vrType)
+ }
+
+ switch t.Kind() {
+ case reflect.Float32:
+ if !dc.Truncate && float64(float32(f)) != f {
+ return emptyValue, errCannotTruncate
+ }
+
+ return reflect.ValueOf(float32(f)), nil
+ case reflect.Float64:
+ return reflect.ValueOf(f), nil
+ default:
+ return emptyValue, ValueDecoderError{
+ Name: "FloatDecodeValue",
+ Kinds: []reflect.Kind{reflect.Float32, reflect.Float64},
+ Received: reflect.Zero(t),
+ }
+ }
+}
+
+// FloatDecodeValue is the ValueDecoderFunc for float types.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() {
+ return ValueDecoderError{
+ Name: "FloatDecodeValue",
+ Kinds: []reflect.Kind{reflect.Float32, reflect.Float64},
+ Received: val,
+ }
+ }
+
+ elem, err := dvd.floatDecodeType(ec, vr, val.Type())
+ if err != nil {
+ return err
+ }
+
+ val.SetFloat(elem.Float())
+ return nil
+}
+
+// StringDecodeValue is the ValueDecoderFunc for string types.
+//
+// Deprecated: StringDecodeValue is not registered by default. Use StringCodec.DecodeValue instead.
+func (dvd DefaultValueDecoders) StringDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ var str string
+ var err error
+ switch vr.Type() {
+ // TODO(GODRIVER-577): Handle JavaScript and Symbol BSON types when allowed.
+ case bsontype.String:
+ str, err = vr.ReadString()
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("cannot decode %v into a string type", vr.Type())
+ }
+ if !val.CanSet() || val.Kind() != reflect.String {
+ return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val}
+ }
+
+ val.SetString(str)
+ return nil
+}
+
+func (DefaultValueDecoders) javaScriptDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ if t != tJavaScript {
+ return emptyValue, ValueDecoderError{
+ Name: "JavaScriptDecodeValue",
+ Types: []reflect.Type{tJavaScript},
+ Received: reflect.Zero(t),
+ }
+ }
+
+ var js string
+ var err error
+ switch vrType := vr.Type(); vrType {
+ case bsontype.JavaScript:
+ js, err = vr.ReadJavascript()
+ case bsontype.Null:
+ err = vr.ReadNull()
+ case bsontype.Undefined:
+ err = vr.ReadUndefined()
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into a primitive.JavaScript", vrType)
+ }
+ if err != nil {
+ return emptyValue, err
+ }
+
+ return reflect.ValueOf(primitive.JavaScript(js)), nil
+}
+
+// JavaScriptDecodeValue is the ValueDecoderFunc for the primitive.JavaScript type.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Type() != tJavaScript {
+ return ValueDecoderError{Name: "JavaScriptDecodeValue", Types: []reflect.Type{tJavaScript}, Received: val}
+ }
+
+ elem, err := dvd.javaScriptDecodeType(dctx, vr, tJavaScript)
+ if err != nil {
+ return err
+ }
+
+ val.SetString(elem.String())
+ return nil
+}
+
+func (DefaultValueDecoders) symbolDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ if t != tSymbol {
+ return emptyValue, ValueDecoderError{
+ Name: "SymbolDecodeValue",
+ Types: []reflect.Type{tSymbol},
+ Received: reflect.Zero(t),
+ }
+ }
+
+ var symbol string
+ var err error
+ switch vrType := vr.Type(); vrType {
+ case bsontype.String:
+ symbol, err = vr.ReadString()
+ case bsontype.Symbol:
+ symbol, err = vr.ReadSymbol()
+ case bsontype.Binary:
+ data, subtype, err := vr.ReadBinary()
+ if err != nil {
+ return emptyValue, err
+ }
+
+ if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld {
+ return emptyValue, decodeBinaryError{subtype: subtype, typeName: "primitive.Symbol"}
+ }
+ symbol = string(data)
+ case bsontype.Null:
+ err = vr.ReadNull()
+ case bsontype.Undefined:
+ err = vr.ReadUndefined()
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Symbol", vrType)
+ }
+ if err != nil {
+ return emptyValue, err
+ }
+
+ return reflect.ValueOf(primitive.Symbol(symbol)), nil
+}
+
+// SymbolDecodeValue is the ValueDecoderFunc for the primitive.Symbol type.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Type() != tSymbol {
+ return ValueDecoderError{Name: "SymbolDecodeValue", Types: []reflect.Type{tSymbol}, Received: val}
+ }
+
+ elem, err := dvd.symbolDecodeType(dctx, vr, tSymbol)
+ if err != nil {
+ return err
+ }
+
+ val.SetString(elem.String())
+ return nil
+}
+
+func (DefaultValueDecoders) binaryDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ if t != tBinary {
+ return emptyValue, ValueDecoderError{
+ Name: "BinaryDecodeValue",
+ Types: []reflect.Type{tBinary},
+ Received: reflect.Zero(t),
+ }
+ }
+
+ var data []byte
+ var subtype byte
+ var err error
+ switch vrType := vr.Type(); vrType {
+ case bsontype.Binary:
+ data, subtype, err = vr.ReadBinary()
+ case bsontype.Null:
+ err = vr.ReadNull()
+ case bsontype.Undefined:
+ err = vr.ReadUndefined()
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into a Binary", vrType)
+ }
+ if err != nil {
+ return emptyValue, err
+ }
+
+ return reflect.ValueOf(primitive.Binary{Subtype: subtype, Data: data}), nil
+}
+
+// BinaryDecodeValue is the ValueDecoderFunc for Binary.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Type() != tBinary {
+ return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: val}
+ }
+
+ elem, err := dvd.binaryDecodeType(dc, vr, tBinary)
+ if err != nil {
+ return err
+ }
+
+ val.Set(elem)
+ return nil
+}
+
+func (DefaultValueDecoders) undefinedDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ if t != tUndefined {
+ return emptyValue, ValueDecoderError{
+ Name: "UndefinedDecodeValue",
+ Types: []reflect.Type{tUndefined},
+ Received: reflect.Zero(t),
+ }
+ }
+
+ var err error
+ switch vrType := vr.Type(); vrType {
+ case bsontype.Undefined:
+ err = vr.ReadUndefined()
+ case bsontype.Null:
+ err = vr.ReadNull()
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into an Undefined", vr.Type())
+ }
+ if err != nil {
+ return emptyValue, err
+ }
+
+ return reflect.ValueOf(primitive.Undefined{}), nil
+}
+
+// UndefinedDecodeValue is the ValueDecoderFunc for Undefined.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Type() != tUndefined {
+ return ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: val}
+ }
+
+ elem, err := dvd.undefinedDecodeType(dc, vr, tUndefined)
+ if err != nil {
+ return err
+ }
+
+ val.Set(elem)
+ return nil
+}
+
+// Accept both 12-byte string and pretty-printed 24-byte hex string formats.
+func (dvd DefaultValueDecoders) objectIDDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ if t != tOID {
+ return emptyValue, ValueDecoderError{
+ Name: "ObjectIDDecodeValue",
+ Types: []reflect.Type{tOID},
+ Received: reflect.Zero(t),
+ }
+ }
+
+ var oid primitive.ObjectID
+ var err error
+ switch vrType := vr.Type(); vrType {
+ case bsontype.ObjectID:
+ oid, err = vr.ReadObjectID()
+ if err != nil {
+ return emptyValue, err
+ }
+ case bsontype.String:
+ str, err := vr.ReadString()
+ if err != nil {
+ return emptyValue, err
+ }
+ if oid, err = primitive.ObjectIDFromHex(str); err == nil {
+ break
+ }
+ if len(str) != 12 {
+ return emptyValue, fmt.Errorf("an ObjectID string must be exactly 12 bytes long (got %v)", len(str))
+ }
+ byteArr := []byte(str)
+ copy(oid[:], byteArr)
+ case bsontype.Null:
+ if err = vr.ReadNull(); err != nil {
+ return emptyValue, err
+ }
+ case bsontype.Undefined:
+ if err = vr.ReadUndefined(); err != nil {
+ return emptyValue, err
+ }
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into an ObjectID", vrType)
+ }
+
+ return reflect.ValueOf(oid), nil
+}
+
+// ObjectIDDecodeValue is the ValueDecoderFunc for primitive.ObjectID.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Type() != tOID {
+ return ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: val}
+ }
+
+ elem, err := dvd.objectIDDecodeType(dc, vr, tOID)
+ if err != nil {
+ return err
+ }
+
+ val.Set(elem)
+ return nil
+}
+
+func (DefaultValueDecoders) dateTimeDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ if t != tDateTime {
+ return emptyValue, ValueDecoderError{
+ Name: "DateTimeDecodeValue",
+ Types: []reflect.Type{tDateTime},
+ Received: reflect.Zero(t),
+ }
+ }
+
+ var dt int64
+ var err error
+ switch vrType := vr.Type(); vrType {
+ case bsontype.DateTime:
+ dt, err = vr.ReadDateTime()
+ case bsontype.Null:
+ err = vr.ReadNull()
+ case bsontype.Undefined:
+ err = vr.ReadUndefined()
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into a DateTime", vrType)
+ }
+ if err != nil {
+ return emptyValue, err
+ }
+
+ return reflect.ValueOf(primitive.DateTime(dt)), nil
+}
+
+// DateTimeDecodeValue is the ValueDecoderFunc for DateTime.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Type() != tDateTime {
+ return ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: val}
+ }
+
+ elem, err := dvd.dateTimeDecodeType(dc, vr, tDateTime)
+ if err != nil {
+ return err
+ }
+
+ val.Set(elem)
+ return nil
+}
+
+func (DefaultValueDecoders) nullDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ if t != tNull {
+ return emptyValue, ValueDecoderError{
+ Name: "NullDecodeValue",
+ Types: []reflect.Type{tNull},
+ Received: reflect.Zero(t),
+ }
+ }
+
+ var err error
+ switch vrType := vr.Type(); vrType {
+ case bsontype.Undefined:
+ err = vr.ReadUndefined()
+ case bsontype.Null:
+ err = vr.ReadNull()
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into a Null", vr.Type())
+ }
+ if err != nil {
+ return emptyValue, err
+ }
+
+ return reflect.ValueOf(primitive.Null{}), nil
+}
+
+// NullDecodeValue is the ValueDecoderFunc for Null.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Type() != tNull {
+ return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val}
+ }
+
+ elem, err := dvd.nullDecodeType(dc, vr, tNull)
+ if err != nil {
+ return err
+ }
+
+ val.Set(elem)
+ return nil
+}
+
+func (DefaultValueDecoders) regexDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ if t != tRegex {
+ return emptyValue, ValueDecoderError{
+ Name: "RegexDecodeValue",
+ Types: []reflect.Type{tRegex},
+ Received: reflect.Zero(t),
+ }
+ }
+
+ var pattern, options string
+ var err error
+ switch vrType := vr.Type(); vrType {
+ case bsontype.Regex:
+ pattern, options, err = vr.ReadRegex()
+ case bsontype.Null:
+ err = vr.ReadNull()
+ case bsontype.Undefined:
+ err = vr.ReadUndefined()
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into a Regex", vrType)
+ }
+ if err != nil {
+ return emptyValue, err
+ }
+
+ return reflect.ValueOf(primitive.Regex{Pattern: pattern, Options: options}), nil
+}
+
+// RegexDecodeValue is the ValueDecoderFunc for Regex.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Type() != tRegex {
+ return ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: val}
+ }
+
+ elem, err := dvd.regexDecodeType(dc, vr, tRegex)
+ if err != nil {
+ return err
+ }
+
+ val.Set(elem)
+ return nil
+}
+
+func (DefaultValueDecoders) dBPointerDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ if t != tDBPointer {
+ return emptyValue, ValueDecoderError{
+ Name: "DBPointerDecodeValue",
+ Types: []reflect.Type{tDBPointer},
+ Received: reflect.Zero(t),
+ }
+ }
+
+ var ns string
+ var pointer primitive.ObjectID
+ var err error
+ switch vrType := vr.Type(); vrType {
+ case bsontype.DBPointer:
+ ns, pointer, err = vr.ReadDBPointer()
+ case bsontype.Null:
+ err = vr.ReadNull()
+ case bsontype.Undefined:
+ err = vr.ReadUndefined()
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into a DBPointer", vrType)
+ }
+ if err != nil {
+ return emptyValue, err
+ }
+
+ return reflect.ValueOf(primitive.DBPointer{DB: ns, Pointer: pointer}), nil
+}
+
+// DBPointerDecodeValue is the ValueDecoderFunc for DBPointer.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Type() != tDBPointer {
+ return ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: val}
+ }
+
+ elem, err := dvd.dBPointerDecodeType(dc, vr, tDBPointer)
+ if err != nil {
+ return err
+ }
+
+ val.Set(elem)
+ return nil
+}
+
+func (DefaultValueDecoders) timestampDecodeType(_ DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) {
+ if reflectType != tTimestamp {
+ return emptyValue, ValueDecoderError{
+ Name: "TimestampDecodeValue",
+ Types: []reflect.Type{tTimestamp},
+ Received: reflect.Zero(reflectType),
+ }
+ }
+
+ var t, incr uint32
+ var err error
+ switch vrType := vr.Type(); vrType {
+ case bsontype.Timestamp:
+ t, incr, err = vr.ReadTimestamp()
+ case bsontype.Null:
+ err = vr.ReadNull()
+ case bsontype.Undefined:
+ err = vr.ReadUndefined()
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into a Timestamp", vrType)
+ }
+ if err != nil {
+ return emptyValue, err
+ }
+
+ return reflect.ValueOf(primitive.Timestamp{T: t, I: incr}), nil
+}
+
+// TimestampDecodeValue is the ValueDecoderFunc for Timestamp.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Type() != tTimestamp {
+ return ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: val}
+ }
+
+ elem, err := dvd.timestampDecodeType(dc, vr, tTimestamp)
+ if err != nil {
+ return err
+ }
+
+ val.Set(elem)
+ return nil
+}
+
+func (DefaultValueDecoders) minKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ if t != tMinKey {
+ return emptyValue, ValueDecoderError{
+ Name: "MinKeyDecodeValue",
+ Types: []reflect.Type{tMinKey},
+ Received: reflect.Zero(t),
+ }
+ }
+
+ var err error
+ switch vrType := vr.Type(); vrType {
+ case bsontype.MinKey:
+ err = vr.ReadMinKey()
+ case bsontype.Null:
+ err = vr.ReadNull()
+ case bsontype.Undefined:
+ err = vr.ReadUndefined()
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into a MinKey", vr.Type())
+ }
+ if err != nil {
+ return emptyValue, err
+ }
+
+ return reflect.ValueOf(primitive.MinKey{}), nil
+}
+
+// MinKeyDecodeValue is the ValueDecoderFunc for MinKey.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Type() != tMinKey {
+ return ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: val}
+ }
+
+ elem, err := dvd.minKeyDecodeType(dc, vr, tMinKey)
+ if err != nil {
+ return err
+ }
+
+ val.Set(elem)
+ return nil
+}
+
+func (DefaultValueDecoders) maxKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ if t != tMaxKey {
+ return emptyValue, ValueDecoderError{
+ Name: "MaxKeyDecodeValue",
+ Types: []reflect.Type{tMaxKey},
+ Received: reflect.Zero(t),
+ }
+ }
+
+ var err error
+ switch vrType := vr.Type(); vrType {
+ case bsontype.MaxKey:
+ err = vr.ReadMaxKey()
+ case bsontype.Null:
+ err = vr.ReadNull()
+ case bsontype.Undefined:
+ err = vr.ReadUndefined()
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into a MaxKey", vr.Type())
+ }
+ if err != nil {
+ return emptyValue, err
+ }
+
+ return reflect.ValueOf(primitive.MaxKey{}), nil
+}
+
+// MaxKeyDecodeValue is the ValueDecoderFunc for MaxKey.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Type() != tMaxKey {
+ return ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: val}
+ }
+
+ elem, err := dvd.maxKeyDecodeType(dc, vr, tMaxKey)
+ if err != nil {
+ return err
+ }
+
+ val.Set(elem)
+ return nil
+}
+
+func (dvd DefaultValueDecoders) decimal128DecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ if t != tDecimal {
+ return emptyValue, ValueDecoderError{
+ Name: "Decimal128DecodeValue",
+ Types: []reflect.Type{tDecimal},
+ Received: reflect.Zero(t),
+ }
+ }
+
+ var d128 primitive.Decimal128
+ var err error
+ switch vrType := vr.Type(); vrType {
+ case bsontype.Decimal128:
+ d128, err = vr.ReadDecimal128()
+ case bsontype.Null:
+ err = vr.ReadNull()
+ case bsontype.Undefined:
+ err = vr.ReadUndefined()
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Decimal128", vr.Type())
+ }
+ if err != nil {
+ return emptyValue, err
+ }
+
+ return reflect.ValueOf(d128), nil
+}
+
+// Decimal128DecodeValue is the ValueDecoderFunc for primitive.Decimal128.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Type() != tDecimal {
+ return ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: val}
+ }
+
+ elem, err := dvd.decimal128DecodeType(dctx, vr, tDecimal)
+ if err != nil {
+ return err
+ }
+
+ val.Set(elem)
+ return nil
+}
+
+func (dvd DefaultValueDecoders) jsonNumberDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ if t != tJSONNumber {
+ return emptyValue, ValueDecoderError{
+ Name: "JSONNumberDecodeValue",
+ Types: []reflect.Type{tJSONNumber},
+ Received: reflect.Zero(t),
+ }
+ }
+
+ var jsonNum json.Number
+ var err error
+ switch vrType := vr.Type(); vrType {
+ case bsontype.Double:
+ f64, err := vr.ReadDouble()
+ if err != nil {
+ return emptyValue, err
+ }
+ jsonNum = json.Number(strconv.FormatFloat(f64, 'f', -1, 64))
+ case bsontype.Int32:
+ i32, err := vr.ReadInt32()
+ if err != nil {
+ return emptyValue, err
+ }
+ jsonNum = json.Number(strconv.FormatInt(int64(i32), 10))
+ case bsontype.Int64:
+ i64, err := vr.ReadInt64()
+ if err != nil {
+ return emptyValue, err
+ }
+ jsonNum = json.Number(strconv.FormatInt(i64, 10))
+ case bsontype.Null:
+ err = vr.ReadNull()
+ case bsontype.Undefined:
+ err = vr.ReadUndefined()
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into a json.Number", vrType)
+ }
+ if err != nil {
+ return emptyValue, err
+ }
+
+ return reflect.ValueOf(jsonNum), nil
+}
+
+// JSONNumberDecodeValue is the ValueDecoderFunc for json.Number.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Type() != tJSONNumber {
+ return ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: val}
+ }
+
+ elem, err := dvd.jsonNumberDecodeType(dc, vr, tJSONNumber)
+ if err != nil {
+ return err
+ }
+
+ val.Set(elem)
+ return nil
+}
+
+func (dvd DefaultValueDecoders) urlDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ if t != tURL {
+ return emptyValue, ValueDecoderError{
+ Name: "URLDecodeValue",
+ Types: []reflect.Type{tURL},
+ Received: reflect.Zero(t),
+ }
+ }
+
+ urlPtr := &url.URL{}
+ var err error
+ switch vrType := vr.Type(); vrType {
+ case bsontype.String:
+ var str string // Declare str here to avoid shadowing err during the ReadString call.
+ str, err = vr.ReadString()
+ if err != nil {
+ return emptyValue, err
+ }
+
+ urlPtr, err = url.Parse(str)
+ case bsontype.Null:
+ err = vr.ReadNull()
+ case bsontype.Undefined:
+ err = vr.ReadUndefined()
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into a *url.URL", vrType)
+ }
+ if err != nil {
+ return emptyValue, err
+ }
+
+ return reflect.ValueOf(urlPtr).Elem(), nil
+}
+
+// URLDecodeValue is the ValueDecoderFunc for url.URL.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Type() != tURL {
+ return ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: val}
+ }
+
+ elem, err := dvd.urlDecodeType(dc, vr, tURL)
+ if err != nil {
+ return err
+ }
+
+ val.Set(elem)
+ return nil
+}
+
+// TimeDecodeValue is the ValueDecoderFunc for time.Time.
+//
+// Deprecated: TimeDecodeValue is not registered by default. Use TimeCodec.DecodeValue instead.
+func (dvd DefaultValueDecoders) TimeDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if vr.Type() != bsontype.DateTime {
+ return fmt.Errorf("cannot decode %v into a time.Time", vr.Type())
+ }
+
+ dt, err := vr.ReadDateTime()
+ if err != nil {
+ return err
+ }
+
+ if !val.CanSet() || val.Type() != tTime {
+ return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val}
+ }
+
+ val.Set(reflect.ValueOf(time.Unix(dt/1000, dt%1000*1000000).UTC()))
+ return nil
+}
+
+// ByteSliceDecodeValue is the ValueDecoderFunc for []byte.
+//
+// Deprecated: ByteSliceDecodeValue is not registered by default. Use ByteSliceCodec.DecodeValue instead.
+func (dvd DefaultValueDecoders) ByteSliceDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null {
+ return fmt.Errorf("cannot decode %v into a []byte", vr.Type())
+ }
+
+ if !val.CanSet() || val.Type() != tByteSlice {
+ return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val}
+ }
+
+ if vr.Type() == bsontype.Null {
+ val.Set(reflect.Zero(val.Type()))
+ return vr.ReadNull()
+ }
+
+ data, subtype, err := vr.ReadBinary()
+ if err != nil {
+ return err
+ }
+ if subtype != 0x00 {
+ return fmt.Errorf("ByteSliceDecodeValue can only be used to decode subtype 0x00 for %s, got %v", bsontype.Binary, subtype)
+ }
+
+ val.Set(reflect.ValueOf(data))
+ return nil
+}
+
+// MapDecodeValue is the ValueDecoderFunc for map[string]* types.
+//
+// Deprecated: MapDecodeValue is not registered by default. Use MapCodec.DecodeValue instead.
+func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String {
+ return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val}
+ }
+
+ switch vr.Type() {
+ case bsontype.Type(0), bsontype.EmbeddedDocument:
+ case bsontype.Null:
+ val.Set(reflect.Zero(val.Type()))
+ return vr.ReadNull()
+ default:
+ return fmt.Errorf("cannot decode %v into a %s", vr.Type(), val.Type())
+ }
+
+ dr, err := vr.ReadDocument()
+ if err != nil {
+ return err
+ }
+
+ if val.IsNil() {
+ val.Set(reflect.MakeMap(val.Type()))
+ }
+
+ eType := val.Type().Elem()
+ decoder, err := dc.LookupDecoder(eType)
+ if err != nil {
+ return err
+ }
+
+ if eType == tEmpty {
+ dc.Ancestor = val.Type()
+ }
+
+ keyType := val.Type().Key()
+ for {
+ key, vr, err := dr.ReadElement()
+ if errors.Is(err, bsonrw.ErrEOD) {
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ elem := reflect.New(eType).Elem()
+
+ err = decoder.DecodeValue(dc, vr, elem)
+ if err != nil {
+ return err
+ }
+
+ val.SetMapIndex(reflect.ValueOf(key).Convert(keyType), elem)
+ }
+ return nil
+}
+
+// ArrayDecodeValue is the ValueDecoderFunc for array types.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.IsValid() || val.Kind() != reflect.Array {
+ return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val}
+ }
+
+ switch vrType := vr.Type(); vrType {
+ case bsontype.Array:
+ case bsontype.Type(0), bsontype.EmbeddedDocument:
+ if val.Type().Elem() != tE {
+ return fmt.Errorf("cannot decode document into %s", val.Type())
+ }
+ case bsontype.Binary:
+ if val.Type().Elem() != tByte {
+ return fmt.Errorf("ArrayDecodeValue can only be used to decode binary into a byte array, got %v", vrType)
+ }
+ data, subtype, err := vr.ReadBinary()
+ if err != nil {
+ return err
+ }
+ if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld {
+ return fmt.Errorf("ArrayDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype)
+ }
+
+ if len(data) > val.Len() {
+ return fmt.Errorf("more elements returned in array than can fit inside %s", val.Type())
+ }
+
+ for idx, elem := range data {
+ val.Index(idx).Set(reflect.ValueOf(elem))
+ }
+ return nil
+ case bsontype.Null:
+ val.Set(reflect.Zero(val.Type()))
+ return vr.ReadNull()
+ case bsontype.Undefined:
+ val.Set(reflect.Zero(val.Type()))
+ return vr.ReadUndefined()
+ default:
+ return fmt.Errorf("cannot decode %v into an array", vrType)
+ }
+
+ var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error)
+ switch val.Type().Elem() {
+ case tE:
+ elemsFunc = dvd.decodeD
+ default:
+ elemsFunc = dvd.decodeDefault
+ }
+
+ elems, err := elemsFunc(dc, vr, val)
+ if err != nil {
+ return err
+ }
+
+ if len(elems) > val.Len() {
+ return fmt.Errorf("more elements returned in array than can fit inside %s, got %v elements", val.Type(), len(elems))
+ }
+
+ for idx, elem := range elems {
+ val.Index(idx).Set(elem)
+ }
+
+ return nil
+}
+
+// SliceDecodeValue is the ValueDecoderFunc for slice types.
+//
+// Deprecated: SliceDecodeValue is not registered by default. Use SliceCodec.DecodeValue instead.
+func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Kind() != reflect.Slice {
+ return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val}
+ }
+
+ switch vr.Type() {
+ case bsontype.Array:
+ case bsontype.Null:
+ val.Set(reflect.Zero(val.Type()))
+ return vr.ReadNull()
+ case bsontype.Type(0), bsontype.EmbeddedDocument:
+ if val.Type().Elem() != tE {
+ return fmt.Errorf("cannot decode document into %s", val.Type())
+ }
+ default:
+ return fmt.Errorf("cannot decode %v into a slice", vr.Type())
+ }
+
+ var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error)
+ switch val.Type().Elem() {
+ case tE:
+ dc.Ancestor = val.Type()
+ elemsFunc = dvd.decodeD
+ default:
+ elemsFunc = dvd.decodeDefault
+ }
+
+ elems, err := elemsFunc(dc, vr, val)
+ if err != nil {
+ return err
+ }
+
+ if val.IsNil() {
+ val.Set(reflect.MakeSlice(val.Type(), 0, len(elems)))
+ }
+
+ val.SetLen(0)
+ val.Set(reflect.Append(val, elems...))
+
+ return nil
+}
+
+// ValueUnmarshalerDecodeValue is the ValueDecoderFunc for ValueUnmarshaler implementations.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.IsValid() || (!val.Type().Implements(tValueUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tValueUnmarshaler)) {
+ return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val}
+ }
+
+ // If BSON value is null and the go value is a pointer, then don't call
+ // UnmarshalBSONValue. Even if the Go pointer is already initialized (i.e.,
+ // non-nil), encountering null in BSON will result in the pointer being
+ // directly set to nil here. Since the pointer is being replaced with nil,
+ // there is no opportunity (or reason) for the custom UnmarshalBSONValue logic
+ // to be called.
+ if vr.Type() == bsontype.Null && val.Kind() == reflect.Ptr {
+ val.Set(reflect.Zero(val.Type()))
+
+ return vr.ReadNull()
+ }
+
+ if val.Kind() == reflect.Ptr && val.IsNil() {
+ if !val.CanSet() {
+ return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val}
+ }
+ val.Set(reflect.New(val.Type().Elem()))
+ }
+
+ if !val.Type().Implements(tValueUnmarshaler) {
+ if !val.CanAddr() {
+ return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val}
+ }
+ val = val.Addr() // If the type doesn't implement the interface, a pointer to it must.
+ }
+
+ t, src, err := bsonrw.Copier{}.CopyValueToBytes(vr)
+ if err != nil {
+ return err
+ }
+
+ m, ok := val.Interface().(ValueUnmarshaler)
+ if !ok {
+ // NB: this error should be unreachable due to the above checks
+ return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val}
+ }
+ return m.UnmarshalBSONValue(t, src)
+}
+
+// UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.IsValid() || (!val.Type().Implements(tUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tUnmarshaler)) {
+ return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val}
+ }
+
+ if val.Kind() == reflect.Ptr && val.IsNil() {
+ if !val.CanSet() {
+ return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val}
+ }
+ val.Set(reflect.New(val.Type().Elem()))
+ }
+
+ _, src, err := bsonrw.Copier{}.CopyValueToBytes(vr)
+ if err != nil {
+ return err
+ }
+
+ // If the target Go value is a pointer and the BSON field value is empty, set the value to the
+ // zero value of the pointer (nil) and don't call UnmarshalBSON. UnmarshalBSON has no way to
+ // change the pointer value from within the function (only the value at the pointer address),
+ // so it can't set the pointer to "nil" itself. Since the most common Go value for an empty BSON
+ // field value is "nil", we set "nil" here and don't call UnmarshalBSON. This behavior matches
+ // the behavior of the Go "encoding/json" unmarshaler when the target Go value is a pointer and
+ // the JSON field value is "null".
+ if val.Kind() == reflect.Ptr && len(src) == 0 {
+ val.Set(reflect.Zero(val.Type()))
+ return nil
+ }
+
+ if !val.Type().Implements(tUnmarshaler) {
+ if !val.CanAddr() {
+ return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val}
+ }
+ val = val.Addr() // If the type doesn't implement the interface, a pointer to it must.
+ }
+
+ m, ok := val.Interface().(Unmarshaler)
+ if !ok {
+ // NB: this error should be unreachable due to the above checks
+ return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val}
+ }
+ return m.UnmarshalBSON(src)
+}
+
+// EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}.
+//
+// Deprecated: EmptyInterfaceDecodeValue is not registered by default. Use EmptyInterfaceCodec.DecodeValue instead.
+func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Type() != tEmpty {
+ return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val}
+ }
+
+ rtype, err := dc.LookupTypeMapEntry(vr.Type())
+ if err != nil {
+ switch vr.Type() {
+ case bsontype.EmbeddedDocument:
+ if dc.Ancestor != nil {
+ rtype = dc.Ancestor
+ break
+ }
+ rtype = tD
+ case bsontype.Null:
+ val.Set(reflect.Zero(val.Type()))
+ return vr.ReadNull()
+ default:
+ return err
+ }
+ }
+
+ decoder, err := dc.LookupDecoder(rtype)
+ if err != nil {
+ return err
+ }
+
+ elem := reflect.New(rtype).Elem()
+ err = decoder.DecodeValue(dc, vr, elem)
+ if err != nil {
+ return err
+ }
+
+ val.Set(elem)
+ return nil
+}
+
+// CoreDocumentDecodeValue is the ValueDecoderFunc for bsoncore.Document.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (DefaultValueDecoders) CoreDocumentDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Type() != tCoreDocument {
+ return ValueDecoderError{Name: "CoreDocumentDecodeValue", Types: []reflect.Type{tCoreDocument}, Received: val}
+ }
+
+ if val.IsNil() {
+ val.Set(reflect.MakeSlice(val.Type(), 0, 0))
+ }
+
+ val.SetLen(0)
+
+ cdoc, err := bsonrw.Copier{}.AppendDocumentBytes(val.Interface().(bsoncore.Document), vr)
+ val.Set(reflect.ValueOf(cdoc))
+ return err
+}
+
+func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) ([]reflect.Value, error) {
+ elems := make([]reflect.Value, 0)
+
+ ar, err := vr.ReadArray()
+ if err != nil {
+ return nil, err
+ }
+
+ eType := val.Type().Elem()
+
+ decoder, err := dc.LookupDecoder(eType)
+ if err != nil {
+ return nil, err
+ }
+ eTypeDecoder, _ := decoder.(typeDecoder)
+
+ idx := 0
+ for {
+ vr, err := ar.ReadValue()
+ if errors.Is(err, bsonrw.ErrEOA) {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true)
+ if err != nil {
+ return nil, newDecodeError(strconv.Itoa(idx), err)
+ }
+ elems = append(elems, elem)
+ idx++
+ }
+
+ return elems, nil
+}
+
+func (dvd DefaultValueDecoders) readCodeWithScope(dc DecodeContext, vr bsonrw.ValueReader) (primitive.CodeWithScope, error) {
+ var cws primitive.CodeWithScope
+
+ code, dr, err := vr.ReadCodeWithScope()
+ if err != nil {
+ return cws, err
+ }
+
+ scope := reflect.New(tD).Elem()
+ elems, err := dvd.decodeElemsFromDocumentReader(dc, dr)
+ if err != nil {
+ return cws, err
+ }
+
+ scope.Set(reflect.MakeSlice(tD, 0, len(elems)))
+ scope.Set(reflect.Append(scope, elems...))
+
+ cws = primitive.CodeWithScope{
+ Code: primitive.JavaScript(code),
+ Scope: scope.Interface().(primitive.D),
+ }
+ return cws, nil
+}
+
+func (dvd DefaultValueDecoders) codeWithScopeDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ if t != tCodeWithScope {
+ return emptyValue, ValueDecoderError{
+ Name: "CodeWithScopeDecodeValue",
+ Types: []reflect.Type{tCodeWithScope},
+ Received: reflect.Zero(t),
+ }
+ }
+
+ var cws primitive.CodeWithScope
+ var err error
+ switch vrType := vr.Type(); vrType {
+ case bsontype.CodeWithScope:
+ cws, err = dvd.readCodeWithScope(dc, vr)
+ case bsontype.Null:
+ err = vr.ReadNull()
+ case bsontype.Undefined:
+ err = vr.ReadUndefined()
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into a primitive.CodeWithScope", vrType)
+ }
+ if err != nil {
+ return emptyValue, err
+ }
+
+ return reflect.ValueOf(cws), nil
+}
+
+// CodeWithScopeDecodeValue is the ValueDecoderFunc for CodeWithScope.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value decoders registered.
+func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Type() != tCodeWithScope {
+ return ValueDecoderError{Name: "CodeWithScopeDecodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val}
+ }
+
+ elem, err := dvd.codeWithScopeDecodeType(dc, vr, tCodeWithScope)
+ if err != nil {
+ return err
+ }
+
+ val.Set(elem)
+ return nil
+}
+
+func (dvd DefaultValueDecoders) decodeD(dc DecodeContext, vr bsonrw.ValueReader, _ reflect.Value) ([]reflect.Value, error) {
+ switch vr.Type() {
+ case bsontype.Type(0), bsontype.EmbeddedDocument:
+ default:
+ return nil, fmt.Errorf("cannot decode %v into a D", vr.Type())
+ }
+
+ dr, err := vr.ReadDocument()
+ if err != nil {
+ return nil, err
+ }
+
+ return dvd.decodeElemsFromDocumentReader(dc, dr)
+}
+
+func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr bsonrw.DocumentReader) ([]reflect.Value, error) {
+ decoder, err := dc.LookupDecoder(tEmpty)
+ if err != nil {
+ return nil, err
+ }
+
+ elems := make([]reflect.Value, 0)
+ for {
+ key, vr, err := dr.ReadElement()
+ if errors.Is(err, bsonrw.ErrEOD) {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ val := reflect.New(tEmpty).Elem()
+ err = decoder.DecodeValue(dc, vr, val)
+ if err != nil {
+ return nil, newDecodeError(key, err)
+ }
+
+ elems = append(elems, reflect.ValueOf(primitive.E{Key: key, Value: val.Interface()}))
+ }
+
+ return elems, nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go
new file mode 100644
index 00000000000..4751ae995e7
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go
@@ -0,0 +1,856 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "net/url"
+ "reflect"
+ "sync"
+ "time"
+
+ "go.mongodb.org/mongo-driver/bson/bsonrw"
+ "go.mongodb.org/mongo-driver/bson/bsontype"
+ "go.mongodb.org/mongo-driver/bson/primitive"
+ "go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+var defaultValueEncoders DefaultValueEncoders
+
+var bvwPool = bsonrw.NewBSONValueWriterPool()
+
+var errInvalidValue = errors.New("cannot encode invalid element")
+
+var sliceWriterPool = sync.Pool{
+ New: func() interface{} {
+ sw := make(bsonrw.SliceWriter, 0)
+ return &sw
+ },
+}
+
+func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) error {
+ vw, err := dw.WriteDocumentElement(e.Key)
+ if err != nil {
+ return err
+ }
+
+ if e.Value == nil {
+ return vw.WriteNull()
+ }
+ encoder, err := ec.LookupEncoder(reflect.TypeOf(e.Value))
+ if err != nil {
+ return err
+ }
+
+ err = encoder.EncodeValue(ec, vw, reflect.ValueOf(e.Value))
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// DefaultValueEncoders is a namespace type for the default ValueEncoders used
+// when creating a registry.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+type DefaultValueEncoders struct{}
+
+// RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with
+// the provided RegistryBuilder.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) {
+ if rb == nil {
+ panic(errors.New("argument to RegisterDefaultEncoders must not be nil"))
+ }
+ rb.
+ RegisterTypeEncoder(tByteSlice, defaultByteSliceCodec).
+ RegisterTypeEncoder(tTime, defaultTimeCodec).
+ RegisterTypeEncoder(tEmpty, defaultEmptyInterfaceCodec).
+ RegisterTypeEncoder(tCoreArray, defaultArrayCodec).
+ RegisterTypeEncoder(tOID, ValueEncoderFunc(dve.ObjectIDEncodeValue)).
+ RegisterTypeEncoder(tDecimal, ValueEncoderFunc(dve.Decimal128EncodeValue)).
+ RegisterTypeEncoder(tJSONNumber, ValueEncoderFunc(dve.JSONNumberEncodeValue)).
+ RegisterTypeEncoder(tURL, ValueEncoderFunc(dve.URLEncodeValue)).
+ RegisterTypeEncoder(tJavaScript, ValueEncoderFunc(dve.JavaScriptEncodeValue)).
+ RegisterTypeEncoder(tSymbol, ValueEncoderFunc(dve.SymbolEncodeValue)).
+ RegisterTypeEncoder(tBinary, ValueEncoderFunc(dve.BinaryEncodeValue)).
+ RegisterTypeEncoder(tUndefined, ValueEncoderFunc(dve.UndefinedEncodeValue)).
+ RegisterTypeEncoder(tDateTime, ValueEncoderFunc(dve.DateTimeEncodeValue)).
+ RegisterTypeEncoder(tNull, ValueEncoderFunc(dve.NullEncodeValue)).
+ RegisterTypeEncoder(tRegex, ValueEncoderFunc(dve.RegexEncodeValue)).
+ RegisterTypeEncoder(tDBPointer, ValueEncoderFunc(dve.DBPointerEncodeValue)).
+ RegisterTypeEncoder(tTimestamp, ValueEncoderFunc(dve.TimestampEncodeValue)).
+ RegisterTypeEncoder(tMinKey, ValueEncoderFunc(dve.MinKeyEncodeValue)).
+ RegisterTypeEncoder(tMaxKey, ValueEncoderFunc(dve.MaxKeyEncodeValue)).
+ RegisterTypeEncoder(tCoreDocument, ValueEncoderFunc(dve.CoreDocumentEncodeValue)).
+ RegisterTypeEncoder(tCodeWithScope, ValueEncoderFunc(dve.CodeWithScopeEncodeValue)).
+ RegisterDefaultEncoder(reflect.Bool, ValueEncoderFunc(dve.BooleanEncodeValue)).
+ RegisterDefaultEncoder(reflect.Int, ValueEncoderFunc(dve.IntEncodeValue)).
+ RegisterDefaultEncoder(reflect.Int8, ValueEncoderFunc(dve.IntEncodeValue)).
+ RegisterDefaultEncoder(reflect.Int16, ValueEncoderFunc(dve.IntEncodeValue)).
+ RegisterDefaultEncoder(reflect.Int32, ValueEncoderFunc(dve.IntEncodeValue)).
+ RegisterDefaultEncoder(reflect.Int64, ValueEncoderFunc(dve.IntEncodeValue)).
+ RegisterDefaultEncoder(reflect.Uint, defaultUIntCodec).
+ RegisterDefaultEncoder(reflect.Uint8, defaultUIntCodec).
+ RegisterDefaultEncoder(reflect.Uint16, defaultUIntCodec).
+ RegisterDefaultEncoder(reflect.Uint32, defaultUIntCodec).
+ RegisterDefaultEncoder(reflect.Uint64, defaultUIntCodec).
+ RegisterDefaultEncoder(reflect.Float32, ValueEncoderFunc(dve.FloatEncodeValue)).
+ RegisterDefaultEncoder(reflect.Float64, ValueEncoderFunc(dve.FloatEncodeValue)).
+ RegisterDefaultEncoder(reflect.Array, ValueEncoderFunc(dve.ArrayEncodeValue)).
+ RegisterDefaultEncoder(reflect.Map, defaultMapCodec).
+ RegisterDefaultEncoder(reflect.Slice, defaultSliceCodec).
+ RegisterDefaultEncoder(reflect.String, defaultStringCodec).
+ RegisterDefaultEncoder(reflect.Struct, newDefaultStructCodec()).
+ RegisterDefaultEncoder(reflect.Ptr, NewPointerCodec()).
+ RegisterHookEncoder(tValueMarshaler, ValueEncoderFunc(dve.ValueMarshalerEncodeValue)).
+ RegisterHookEncoder(tMarshaler, ValueEncoderFunc(dve.MarshalerEncodeValue)).
+ RegisterHookEncoder(tProxy, ValueEncoderFunc(dve.ProxyEncodeValue))
+}
+
+// BooleanEncodeValue is the ValueEncoderFunc for bool types.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) BooleanEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Kind() != reflect.Bool {
+ return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val}
+ }
+ return vw.WriteBoolean(val.Bool())
+}
+
+func fitsIn32Bits(i int64) bool {
+ return math.MinInt32 <= i && i <= math.MaxInt32
+}
+
+// IntEncodeValue is the ValueEncoderFunc for int types.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ switch val.Kind() {
+ case reflect.Int8, reflect.Int16, reflect.Int32:
+ return vw.WriteInt32(int32(val.Int()))
+ case reflect.Int:
+ i64 := val.Int()
+ if fitsIn32Bits(i64) {
+ return vw.WriteInt32(int32(i64))
+ }
+ return vw.WriteInt64(i64)
+ case reflect.Int64:
+ i64 := val.Int()
+ if ec.MinSize && fitsIn32Bits(i64) {
+ return vw.WriteInt32(int32(i64))
+ }
+ return vw.WriteInt64(i64)
+ }
+
+ return ValueEncoderError{
+ Name: "IntEncodeValue",
+ Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int},
+ Received: val,
+ }
+}
+
+// UintEncodeValue is the ValueEncoderFunc for uint types.
+//
+// Deprecated: UintEncodeValue is not registered by default. Use UintCodec.EncodeValue instead.
+func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ switch val.Kind() {
+ case reflect.Uint8, reflect.Uint16:
+ return vw.WriteInt32(int32(val.Uint()))
+ case reflect.Uint, reflect.Uint32, reflect.Uint64:
+ u64 := val.Uint()
+ if ec.MinSize && u64 <= math.MaxInt32 {
+ return vw.WriteInt32(int32(u64))
+ }
+ if u64 > math.MaxInt64 {
+ return fmt.Errorf("%d overflows int64", u64)
+ }
+ return vw.WriteInt64(int64(u64))
+ }
+
+ return ValueEncoderError{
+ Name: "UintEncodeValue",
+ Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
+ Received: val,
+ }
+}
+
+// FloatEncodeValue is the ValueEncoderFunc for float types.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) FloatEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ switch val.Kind() {
+ case reflect.Float32, reflect.Float64:
+ return vw.WriteDouble(val.Float())
+ }
+
+ return ValueEncoderError{Name: "FloatEncodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val}
+}
+
+// StringEncodeValue is the ValueEncoderFunc for string types.
+//
+// Deprecated: StringEncodeValue is not registered by default. Use StringCodec.EncodeValue instead.
+func (dve DefaultValueEncoders) StringEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if val.Kind() != reflect.String {
+ return ValueEncoderError{
+ Name: "StringEncodeValue",
+ Kinds: []reflect.Kind{reflect.String},
+ Received: val,
+ }
+ }
+
+ return vw.WriteString(val.String())
+}
+
+// ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) ObjectIDEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tOID {
+ return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val}
+ }
+ return vw.WriteObjectID(val.Interface().(primitive.ObjectID))
+}
+
+// Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) Decimal128EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tDecimal {
+ return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val}
+ }
+ return vw.WriteDecimal128(val.Interface().(primitive.Decimal128))
+}
+
+// JSONNumberEncodeValue is the ValueEncoderFunc for json.Number.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tJSONNumber {
+ return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val}
+ }
+ jsnum := val.Interface().(json.Number)
+
+ // Attempt int first, then float64
+ if i64, err := jsnum.Int64(); err == nil {
+ return dve.IntEncodeValue(ec, vw, reflect.ValueOf(i64))
+ }
+
+ f64, err := jsnum.Float64()
+ if err != nil {
+ return err
+ }
+
+ return dve.FloatEncodeValue(ec, vw, reflect.ValueOf(f64))
+}
+
+// URLEncodeValue is the ValueEncoderFunc for url.URL.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) URLEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tURL {
+ return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val}
+ }
+ u := val.Interface().(url.URL)
+ return vw.WriteString(u.String())
+}
+
+// TimeEncodeValue is the ValueEncoderFunc for time.TIme.
+//
+// Deprecated: TimeEncodeValue is not registered by default. Use TimeCodec.EncodeValue instead.
+func (dve DefaultValueEncoders) TimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tTime {
+ return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val}
+ }
+ tt := val.Interface().(time.Time)
+ dt := primitive.NewDateTimeFromTime(tt)
+ return vw.WriteDateTime(int64(dt))
+}
+
+// ByteSliceEncodeValue is the ValueEncoderFunc for []byte.
+//
+// Deprecated: ByteSliceEncodeValue is not registered by default. Use ByteSliceCodec.EncodeValue instead.
+func (dve DefaultValueEncoders) ByteSliceEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tByteSlice {
+ return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val}
+ }
+ if val.IsNil() {
+ return vw.WriteNull()
+ }
+ return vw.WriteBinary(val.Interface().([]byte))
+}
+
+// MapEncodeValue is the ValueEncoderFunc for map[string]* types.
+//
+// Deprecated: MapEncodeValue is not registered by default. Use MapCodec.EncodeValue instead.
+func (dve DefaultValueEncoders) MapEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String {
+ return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val}
+ }
+
+ if val.IsNil() {
+ // If we have a nill map but we can't WriteNull, that means we're probably trying to encode
+ // to a TopLevel document. We can't currently tell if this is what actually happened, but if
+ // there's a deeper underlying problem, the error will also be returned from WriteDocument,
+ // so just continue. The operations on a map reflection value are valid, so we can call
+ // MapKeys within mapEncodeValue without a problem.
+ err := vw.WriteNull()
+ if err == nil {
+ return nil
+ }
+ }
+
+ dw, err := vw.WriteDocument()
+ if err != nil {
+ return err
+ }
+
+ return dve.mapEncodeValue(ec, dw, val, nil)
+}
+
+// mapEncodeValue handles encoding of the values of a map. The collisionFn returns
+// true if the provided key exists, this is mainly used for inline maps in the
+// struct codec.
+func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error {
+
+ elemType := val.Type().Elem()
+ encoder, err := ec.LookupEncoder(elemType)
+ if err != nil && elemType.Kind() != reflect.Interface {
+ return err
+ }
+
+ keys := val.MapKeys()
+ for _, key := range keys {
+ if collisionFn != nil && collisionFn(key.String()) {
+ return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key)
+ }
+
+ currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.MapIndex(key))
+ if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) {
+ return lookupErr
+ }
+
+ vw, err := dw.WriteDocumentElement(key.String())
+ if err != nil {
+ return err
+ }
+
+ if errors.Is(lookupErr, errInvalidValue) {
+ err = vw.WriteNull()
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ err = currEncoder.EncodeValue(ec, vw, currVal)
+ if err != nil {
+ return err
+ }
+ }
+
+ return dw.WriteDocumentEnd()
+}
+
+// ArrayEncodeValue is the ValueEncoderFunc for array types.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Kind() != reflect.Array {
+ return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val}
+ }
+
+ // If we have a []primitive.E we want to treat it as a document instead of as an array.
+ if val.Type().Elem() == tE {
+ dw, err := vw.WriteDocument()
+ if err != nil {
+ return err
+ }
+
+ for idx := 0; idx < val.Len(); idx++ {
+ e := val.Index(idx).Interface().(primitive.E)
+ err = encodeElement(ec, dw, e)
+ if err != nil {
+ return err
+ }
+ }
+
+ return dw.WriteDocumentEnd()
+ }
+
+ // If we have a []byte we want to treat it as a binary instead of as an array.
+ if val.Type().Elem() == tByte {
+ var byteSlice []byte
+ for idx := 0; idx < val.Len(); idx++ {
+ byteSlice = append(byteSlice, val.Index(idx).Interface().(byte))
+ }
+ return vw.WriteBinary(byteSlice)
+ }
+
+ aw, err := vw.WriteArray()
+ if err != nil {
+ return err
+ }
+
+ elemType := val.Type().Elem()
+ encoder, err := ec.LookupEncoder(elemType)
+ if err != nil && elemType.Kind() != reflect.Interface {
+ return err
+ }
+
+ for idx := 0; idx < val.Len(); idx++ {
+ currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx))
+ if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) {
+ return lookupErr
+ }
+
+ vw, err := aw.WriteArrayElement()
+ if err != nil {
+ return err
+ }
+
+ if errors.Is(lookupErr, errInvalidValue) {
+ err = vw.WriteNull()
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ err = currEncoder.EncodeValue(ec, vw, currVal)
+ if err != nil {
+ return err
+ }
+ }
+ return aw.WriteArrayEnd()
+}
+
+// SliceEncodeValue is the ValueEncoderFunc for slice types.
+//
+// Deprecated: SliceEncodeValue is not registered by default. Use SliceCodec.EncodeValue instead.
+func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Kind() != reflect.Slice {
+ return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val}
+ }
+
+ if val.IsNil() {
+ return vw.WriteNull()
+ }
+
+ // If we have a []primitive.E we want to treat it as a document instead of as an array.
+ if val.Type().ConvertibleTo(tD) {
+ d := val.Convert(tD).Interface().(primitive.D)
+
+ dw, err := vw.WriteDocument()
+ if err != nil {
+ return err
+ }
+
+ for _, e := range d {
+ err = encodeElement(ec, dw, e)
+ if err != nil {
+ return err
+ }
+ }
+
+ return dw.WriteDocumentEnd()
+ }
+
+ aw, err := vw.WriteArray()
+ if err != nil {
+ return err
+ }
+
+ elemType := val.Type().Elem()
+ encoder, err := ec.LookupEncoder(elemType)
+ if err != nil && elemType.Kind() != reflect.Interface {
+ return err
+ }
+
+ for idx := 0; idx < val.Len(); idx++ {
+ currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx))
+ if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) {
+ return lookupErr
+ }
+
+ vw, err := aw.WriteArrayElement()
+ if err != nil {
+ return err
+ }
+
+ if errors.Is(lookupErr, errInvalidValue) {
+ err = vw.WriteNull()
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ err = currEncoder.EncodeValue(ec, vw, currVal)
+ if err != nil {
+ return err
+ }
+ }
+ return aw.WriteArrayEnd()
+}
+
+func (dve DefaultValueEncoders) lookupElementEncoder(ec EncodeContext, origEncoder ValueEncoder, currVal reflect.Value) (ValueEncoder, reflect.Value, error) {
+ if origEncoder != nil || (currVal.Kind() != reflect.Interface) {
+ return origEncoder, currVal, nil
+ }
+ currVal = currVal.Elem()
+ if !currVal.IsValid() {
+ return nil, currVal, errInvalidValue
+ }
+ currEncoder, err := ec.LookupEncoder(currVal.Type())
+
+ return currEncoder, currVal, err
+}
+
+// EmptyInterfaceEncodeValue is the ValueEncoderFunc for interface{}.
+//
+// Deprecated: EmptyInterfaceEncodeValue is not registered by default. Use EmptyInterfaceCodec.EncodeValue instead.
+func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tEmpty {
+ return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val}
+ }
+
+ if val.IsNil() {
+ return vw.WriteNull()
+ }
+ encoder, err := ec.LookupEncoder(val.Elem().Type())
+ if err != nil {
+ return err
+ }
+
+ return encoder.EncodeValue(ec, vw, val.Elem())
+}
+
+// ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ // Either val or a pointer to val must implement ValueMarshaler
+ switch {
+ case !val.IsValid():
+ return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val}
+ case val.Type().Implements(tValueMarshaler):
+ // If ValueMarshaler is implemented on a concrete type, make sure that val isn't a nil pointer
+ if isImplementationNil(val, tValueMarshaler) {
+ return vw.WriteNull()
+ }
+ case reflect.PtrTo(val.Type()).Implements(tValueMarshaler) && val.CanAddr():
+ val = val.Addr()
+ default:
+ return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val}
+ }
+
+ m, ok := val.Interface().(ValueMarshaler)
+ if !ok {
+ return vw.WriteNull()
+ }
+ t, data, err := m.MarshalBSONValue()
+ if err != nil {
+ return err
+ }
+ return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data)
+}
+
+// MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) MarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ // Either val or a pointer to val must implement Marshaler
+ switch {
+ case !val.IsValid():
+ return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val}
+ case val.Type().Implements(tMarshaler):
+ // If Marshaler is implemented on a concrete type, make sure that val isn't a nil pointer
+ if isImplementationNil(val, tMarshaler) {
+ return vw.WriteNull()
+ }
+ case reflect.PtrTo(val.Type()).Implements(tMarshaler) && val.CanAddr():
+ val = val.Addr()
+ default:
+ return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val}
+ }
+
+ m, ok := val.Interface().(Marshaler)
+ if !ok {
+ return vw.WriteNull()
+ }
+ data, err := m.MarshalBSON()
+ if err != nil {
+ return err
+ }
+ return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data)
+}
+
+// ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ // Either val or a pointer to val must implement Proxy
+ switch {
+ case !val.IsValid():
+ return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val}
+ case val.Type().Implements(tProxy):
+ // If Proxy is implemented on a concrete type, make sure that val isn't a nil pointer
+ if isImplementationNil(val, tProxy) {
+ return vw.WriteNull()
+ }
+ case reflect.PtrTo(val.Type()).Implements(tProxy) && val.CanAddr():
+ val = val.Addr()
+ default:
+ return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val}
+ }
+
+ m, ok := val.Interface().(Proxy)
+ if !ok {
+ return vw.WriteNull()
+ }
+ v, err := m.ProxyBSON()
+ if err != nil {
+ return err
+ }
+ if v == nil {
+ encoder, err := ec.LookupEncoder(nil)
+ if err != nil {
+ return err
+ }
+ return encoder.EncodeValue(ec, vw, reflect.ValueOf(nil))
+ }
+ vv := reflect.ValueOf(v)
+ switch vv.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ vv = vv.Elem()
+ }
+ encoder, err := ec.LookupEncoder(vv.Type())
+ if err != nil {
+ return err
+ }
+ return encoder.EncodeValue(ec, vw, vv)
+}
+
+// JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) JavaScriptEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tJavaScript {
+ return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val}
+ }
+
+ return vw.WriteJavascript(val.String())
+}
+
+// SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) SymbolEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tSymbol {
+ return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val}
+ }
+
+ return vw.WriteSymbol(val.String())
+}
+
+// BinaryEncodeValue is the ValueEncoderFunc for Binary.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) BinaryEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tBinary {
+ return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val}
+ }
+ b := val.Interface().(primitive.Binary)
+
+ return vw.WriteBinaryWithSubtype(b.Data, b.Subtype)
+}
+
+// UndefinedEncodeValue is the ValueEncoderFunc for Undefined.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) UndefinedEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tUndefined {
+ return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val}
+ }
+
+ return vw.WriteUndefined()
+}
+
+// DateTimeEncodeValue is the ValueEncoderFunc for DateTime.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) DateTimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tDateTime {
+ return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val}
+ }
+
+ return vw.WriteDateTime(val.Int())
+}
+
+// NullEncodeValue is the ValueEncoderFunc for Null.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) NullEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tNull {
+ return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val}
+ }
+
+ return vw.WriteNull()
+}
+
+// RegexEncodeValue is the ValueEncoderFunc for Regex.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) RegexEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tRegex {
+ return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val}
+ }
+
+ regex := val.Interface().(primitive.Regex)
+
+ return vw.WriteRegex(regex.Pattern, regex.Options)
+}
+
+// DBPointerEncodeValue is the ValueEncoderFunc for DBPointer.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) DBPointerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tDBPointer {
+ return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val}
+ }
+
+ dbp := val.Interface().(primitive.DBPointer)
+
+ return vw.WriteDBPointer(dbp.DB, dbp.Pointer)
+}
+
+// TimestampEncodeValue is the ValueEncoderFunc for Timestamp.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) TimestampEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tTimestamp {
+ return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val}
+ }
+
+ ts := val.Interface().(primitive.Timestamp)
+
+ return vw.WriteTimestamp(ts.T, ts.I)
+}
+
+// MinKeyEncodeValue is the ValueEncoderFunc for MinKey.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) MinKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tMinKey {
+ return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val}
+ }
+
+ return vw.WriteMinKey()
+}
+
+// MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) MaxKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tMaxKey {
+ return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val}
+ }
+
+ return vw.WriteMaxKey()
+}
+
+// CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (DefaultValueEncoders) CoreDocumentEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tCoreDocument {
+ return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val}
+ }
+
+ cdoc := val.Interface().(bsoncore.Document)
+
+ return bsonrw.Copier{}.CopyDocumentFromBytes(vw, cdoc)
+}
+
+// CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
+// value encoders registered.
+func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tCodeWithScope {
+ return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val}
+ }
+
+ cws := val.Interface().(primitive.CodeWithScope)
+
+ dw, err := vw.WriteCodeWithScope(string(cws.Code))
+ if err != nil {
+ return err
+ }
+
+ sw := sliceWriterPool.Get().(*bsonrw.SliceWriter)
+ defer sliceWriterPool.Put(sw)
+ *sw = (*sw)[:0]
+
+ scopeVW := bvwPool.Get(sw)
+ defer bvwPool.Put(scopeVW)
+
+ encoder, err := ec.LookupEncoder(reflect.TypeOf(cws.Scope))
+ if err != nil {
+ return err
+ }
+
+ err = encoder.EncodeValue(ec, scopeVW, reflect.ValueOf(cws.Scope))
+ if err != nil {
+ return err
+ }
+
+ err = bsonrw.Copier{}.CopyBytesToDocumentWriter(dw, *sw)
+ if err != nil {
+ return err
+ }
+ return dw.WriteDocumentEnd()
+}
+
+// isImplementationNil returns if val is a nil pointer and inter is implemented on a concrete type
+func isImplementationNil(val reflect.Value, inter reflect.Type) bool {
+ vt := val.Type()
+ for vt.Kind() == reflect.Ptr {
+ vt = vt.Elem()
+ }
+ return vt.Implements(inter) && val.Kind() == reflect.Ptr && val.IsNil()
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go
new file mode 100644
index 00000000000..4613e5a1ec7
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go
@@ -0,0 +1,95 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package bsoncodec provides a system for encoding values to BSON representations and decoding
+// values from BSON representations. This package considers both binary BSON and ExtendedJSON as
+// BSON representations. The types in this package enable a flexible system for handling this
+// encoding and decoding.
+//
+// The codec system is composed of two parts:
+//
+// 1) ValueEncoders and ValueDecoders that handle encoding and decoding Go values to and from BSON
+// representations.
+//
+// 2) A Registry that holds these ValueEncoders and ValueDecoders and provides methods for
+// retrieving them.
+//
+// # ValueEncoders and ValueDecoders
+//
+// The ValueEncoder interface is implemented by types that can encode a provided Go type to BSON.
+// The value to encode is provided as a reflect.Value and a bsonrw.ValueWriter is used within the
+// EncodeValue method to actually create the BSON representation. For convenience, ValueEncoderFunc
+// is provided to allow use of a function with the correct signature as a ValueEncoder. An
+// EncodeContext instance is provided to allow implementations to lookup further ValueEncoders and
+// to provide configuration information.
+//
+// The ValueDecoder interface is the inverse of the ValueEncoder. Implementations should ensure that
+// the value they receive is settable. Similar to ValueEncoderFunc, ValueDecoderFunc is provided to
+// allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext
+// instance is provided and serves similar functionality to the EncodeContext.
+//
+// # Registry
+//
+// A Registry is a store for ValueEncoders, ValueDecoders, and a type map. See the Registry type
+// documentation for examples of registering various custom encoders and decoders. A Registry can
+// have three main types of codecs:
+//
+// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and
+// RegisterTypeDecoder methods. The registered codec will be invoked when encoding/decoding a value
+// whose type matches the registered type exactly.
+// If the registered type is an interface, the codec will be invoked when encoding or decoding
+// values whose type is the interface, but not for values with concrete types that implement the
+// interface.
+//
+// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and
+// RegisterHookDecoder methods. These methods only accept interface types and the registered codecs
+// will be invoked when encoding or decoding values whose types implement the interface. An example
+// of a hook defined by the driver is bson.Marshaler. The driver will call the MarshalBSON method
+// for any value whose type implements bson.Marshaler, regardless of the value's concrete type.
+//
+// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type
+// associations are used when decoding into a bson.D/bson.M or a struct field of type interface{}.
+// For example, by default, BSON int32 and int64 values decode as Go int32 and int64 instances,
+// respectively, when decoding into a bson.D. The following code would change the behavior so these
+// values decode as Go int instances instead:
+//
+// intType := reflect.TypeOf(int(0))
+// registry.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType)
+//
+// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and
+// RegisterDefaultDecoder methods. The registered codec will be invoked when encoding or decoding
+// values whose reflect.Kind matches the registered reflect.Kind as long as the value's type doesn't
+// match a registered type or hook encoder/decoder first. These methods should be used to change the
+// behavior for all values for a specific kind.
+//
+// # Registry Lookup Procedure
+//
+// When looking up an encoder in a Registry, the precedence rules are as follows:
+//
+// 1. A type encoder registered for the exact type of the value.
+//
+// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to
+// the value. If the value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and
+// bsoncodec.ValueMarshaler), the first one registered will be selected. Note that registries
+// constructed using bson.NewRegistry have driver-defined hooks registered for the
+// bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those will take
+// precedence over any new hooks.
+//
+// 3. A kind encoder registered for the value's kind.
+//
+// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The
+// same precedence rules apply for decoders, with the exception that an error of type ErrNoDecoder
+// will be returned if no decoder is found.
+//
+// # DefaultValueEncoders and DefaultValueDecoders
+//
+// The DefaultValueEncoders and DefaultValueDecoders types provide a full set of ValueEncoders and
+// ValueDecoders for handling a wide range of Go types, including all of the types within the
+// primitive package. To make registering these codecs easier, a helper method on each type is
+// provided. For the DefaultValueEncoders type the method is called RegisterDefaultEncoders and for
+// the DefaultValueDecoders type the method is called RegisterDefaultDecoders, this method also
+// handles registering type map entries for each BSON type.
+package bsoncodec
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go
new file mode 100644
index 00000000000..098368f0711
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go
@@ -0,0 +1,173 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+ "reflect"
+
+ "go.mongodb.org/mongo-driver/bson/bsonoptions"
+ "go.mongodb.org/mongo-driver/bson/bsonrw"
+ "go.mongodb.org/mongo-driver/bson/bsontype"
+ "go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+// EmptyInterfaceCodec is the Codec used for interface{} values.
+//
+// Deprecated: EmptyInterfaceCodec will not be directly configurable in Go
+// Driver 2.0. To configure the empty interface encode and decode behavior, use
+// the configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or
+// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the empty interface
+// encode and decode behavior for a mongo.Client, use
+// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions].
+//
+// For example, to configure a mongo.Client to unmarshal BSON binary field
+// values as a Go byte slice, use:
+//
+// opt := options.Client().SetBSONOptions(&options.BSONOptions{
+// BinaryAsSlice: true,
+// })
+//
+// See the deprecation notice for each field in EmptyInterfaceCodec for the
+// corresponding settings.
+type EmptyInterfaceCodec struct {
+ // DecodeBinaryAsSlice causes DecodeValue to unmarshal BSON binary field values that are the
+ // "Generic" or "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary.
+ //
+ // Deprecated: Use bson.Decoder.BinaryAsSlice or options.BSONOptions.BinaryAsSlice instead.
+ DecodeBinaryAsSlice bool
+}
+
+var (
+ defaultEmptyInterfaceCodec = NewEmptyInterfaceCodec()
+
+ // Assert that defaultEmptyInterfaceCodec satisfies the typeDecoder interface, which allows it
+ // to be used by collection type decoders (e.g. map, slice, etc) to set individual values in a
+ // collection.
+ _ typeDecoder = defaultEmptyInterfaceCodec
+)
+
+// NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts.
+//
+// Deprecated: NewEmptyInterfaceCodec will not be available in Go Driver 2.0. See
+// [EmptyInterfaceCodec] for more details.
+func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec {
+ interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...)
+
+ codec := EmptyInterfaceCodec{}
+ if interfaceOpt.DecodeBinaryAsSlice != nil {
+ codec.DecodeBinaryAsSlice = *interfaceOpt.DecodeBinaryAsSlice
+ }
+ return &codec
+}
+
+// EncodeValue is the ValueEncoderFunc for interface{}.
+func (eic EmptyInterfaceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tEmpty {
+ return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val}
+ }
+
+ if val.IsNil() {
+ return vw.WriteNull()
+ }
+ encoder, err := ec.LookupEncoder(val.Elem().Type())
+ if err != nil {
+ return err
+ }
+
+ return encoder.EncodeValue(ec, vw, val.Elem())
+}
+
+func (eic EmptyInterfaceCodec) getEmptyInterfaceDecodeType(dc DecodeContext, valueType bsontype.Type) (reflect.Type, error) {
+ isDocument := valueType == bsontype.Type(0) || valueType == bsontype.EmbeddedDocument
+ if isDocument {
+ if dc.defaultDocumentType != nil {
+ // If the bsontype is an embedded document and the DocumentType is set on the DecodeContext, then return
+ // that type.
+ return dc.defaultDocumentType, nil
+ }
+ if dc.Ancestor != nil {
+ // Using ancestor information rather than looking up the type map entry forces consistent decoding.
+ // If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry
+ // has been registered.
+ return dc.Ancestor, nil
+ }
+ }
+
+ rtype, err := dc.LookupTypeMapEntry(valueType)
+ if err == nil {
+ return rtype, nil
+ }
+
+ if isDocument {
+ // For documents, fallback to looking up a type map entry for bsontype.Type(0) or bsontype.EmbeddedDocument,
+ // depending on the original valueType.
+ var lookupType bsontype.Type
+ switch valueType {
+ case bsontype.Type(0):
+ lookupType = bsontype.EmbeddedDocument
+ case bsontype.EmbeddedDocument:
+ lookupType = bsontype.Type(0)
+ }
+
+ rtype, err = dc.LookupTypeMapEntry(lookupType)
+ if err == nil {
+ return rtype, nil
+ }
+ }
+
+ return nil, err
+}
+
+func (eic EmptyInterfaceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ if t != tEmpty {
+ return emptyValue, ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: reflect.Zero(t)}
+ }
+
+ rtype, err := eic.getEmptyInterfaceDecodeType(dc, vr.Type())
+ if err != nil {
+ switch vr.Type() {
+ case bsontype.Null:
+ return reflect.Zero(t), vr.ReadNull()
+ default:
+ return emptyValue, err
+ }
+ }
+
+ decoder, err := dc.LookupDecoder(rtype)
+ if err != nil {
+ return emptyValue, err
+ }
+
+ elem, err := decodeTypeOrValue(decoder, dc, vr, rtype)
+ if err != nil {
+ return emptyValue, err
+ }
+
+ if (eic.DecodeBinaryAsSlice || dc.binaryAsSlice) && rtype == tBinary {
+ binElem := elem.Interface().(primitive.Binary)
+ if binElem.Subtype == bsontype.BinaryGeneric || binElem.Subtype == bsontype.BinaryBinaryOld {
+ elem = reflect.ValueOf(binElem.Data)
+ }
+ }
+
+ return elem, nil
+}
+
+// DecodeValue is the ValueDecoderFunc for interface{}.
+func (eic EmptyInterfaceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Type() != tEmpty {
+ return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val}
+ }
+
+ elem, err := eic.decodeType(dc, vr, val.Type())
+ if err != nil {
+ return err
+ }
+
+ val.Set(elem)
+ return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go
new file mode 100644
index 00000000000..d7e00ffa8d1
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go
@@ -0,0 +1,343 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+
+ "go.mongodb.org/mongo-driver/bson/bsonoptions"
+ "go.mongodb.org/mongo-driver/bson/bsonrw"
+ "go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+var defaultMapCodec = NewMapCodec()
+
+// MapCodec is the Codec used for map values.
+//
+// Deprecated: MapCodec will not be directly configurable in Go Driver 2.0. To
+// configure the map encode and decode behavior, use the configuration methods
+// on a [go.mongodb.org/mongo-driver/bson.Encoder] or
+// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the map encode and
+// decode behavior for a mongo.Client, use
+// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions].
+//
+// For example, to configure a mongo.Client to marshal nil Go maps as empty BSON
+// documents, use:
+//
+// opt := options.Client().SetBSONOptions(&options.BSONOptions{
+// NilMapAsEmpty: true,
+// })
+//
+// See the deprecation notice for each field in MapCodec for the corresponding
+// settings.
+type MapCodec struct {
+ // DecodeZerosMap causes DecodeValue to delete any existing values from Go maps in the destination
+ // value passed to Decode before unmarshaling BSON documents into them.
+ //
+ // Deprecated: Use bson.Decoder.ZeroMaps or options.BSONOptions.ZeroMaps instead.
+ DecodeZerosMap bool
+
+ // EncodeNilAsEmpty causes EncodeValue to marshal nil Go maps as empty BSON documents instead of
+ // BSON null.
+ //
+ // Deprecated: Use bson.Encoder.NilMapAsEmpty or options.BSONOptions.NilMapAsEmpty instead.
+ EncodeNilAsEmpty bool
+
+ // EncodeKeysWithStringer causes the Encoder to convert Go map keys to BSON document field name
+ // strings using fmt.Sprintf() instead of the default string conversion logic.
+ //
+ // Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt or
+ // options.BSONOptions.StringifyMapKeysWithFmt instead.
+ EncodeKeysWithStringer bool
+}
+
+// KeyMarshaler is the interface implemented by an object that can marshal itself into a string key.
+// This applies to types used as map keys and is similar to encoding.TextMarshaler.
+type KeyMarshaler interface {
+ MarshalKey() (key string, err error)
+}
+
+// KeyUnmarshaler is the interface implemented by an object that can unmarshal a string representation
+// of itself. This applies to types used as map keys and is similar to encoding.TextUnmarshaler.
+//
+// UnmarshalKey must be able to decode the form generated by MarshalKey.
+// UnmarshalKey must copy the text if it wishes to retain the text
+// after returning.
+type KeyUnmarshaler interface {
+ UnmarshalKey(key string) error
+}
+
+// NewMapCodec returns a MapCodec with options opts.
+//
+// Deprecated: NewMapCodec will not be available in Go Driver 2.0. See
+// [MapCodec] for more details.
+func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec {
+ mapOpt := bsonoptions.MergeMapCodecOptions(opts...)
+
+ codec := MapCodec{}
+ if mapOpt.DecodeZerosMap != nil {
+ codec.DecodeZerosMap = *mapOpt.DecodeZerosMap
+ }
+ if mapOpt.EncodeNilAsEmpty != nil {
+ codec.EncodeNilAsEmpty = *mapOpt.EncodeNilAsEmpty
+ }
+ if mapOpt.EncodeKeysWithStringer != nil {
+ codec.EncodeKeysWithStringer = *mapOpt.EncodeKeysWithStringer
+ }
+ return &codec
+}
+
+// EncodeValue is the ValueEncoder for map[*]* types.
+func (mc *MapCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Kind() != reflect.Map {
+ return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val}
+ }
+
+ if val.IsNil() && !mc.EncodeNilAsEmpty && !ec.nilMapAsEmpty {
+ // If we have a nil map but we can't WriteNull, that means we're probably trying to encode
+ // to a TopLevel document. We can't currently tell if this is what actually happened, but if
+ // there's a deeper underlying problem, the error will also be returned from WriteDocument,
+ // so just continue. The operations on a map reflection value are valid, so we can call
+ // MapKeys within mapEncodeValue without a problem.
+ err := vw.WriteNull()
+ if err == nil {
+ return nil
+ }
+ }
+
+ dw, err := vw.WriteDocument()
+ if err != nil {
+ return err
+ }
+
+ return mc.mapEncodeValue(ec, dw, val, nil)
+}
+
+// mapEncodeValue handles encoding of the values of a map. The collisionFn returns
+// true if the provided key exists, this is mainly used for inline maps in the
+// struct codec.
+func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error {
+
+ elemType := val.Type().Elem()
+ encoder, err := ec.LookupEncoder(elemType)
+ if err != nil && elemType.Kind() != reflect.Interface {
+ return err
+ }
+
+ keys := val.MapKeys()
+ for _, key := range keys {
+ keyStr, err := mc.encodeKey(key, ec.stringifyMapKeysWithFmt)
+ if err != nil {
+ return err
+ }
+
+ if collisionFn != nil && collisionFn(keyStr) {
+ return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key)
+ }
+
+ currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.MapIndex(key))
+ if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) {
+ return lookupErr
+ }
+
+ vw, err := dw.WriteDocumentElement(keyStr)
+ if err != nil {
+ return err
+ }
+
+ if errors.Is(lookupErr, errInvalidValue) {
+ err = vw.WriteNull()
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ err = currEncoder.EncodeValue(ec, vw, currVal)
+ if err != nil {
+ return err
+ }
+ }
+
+ return dw.WriteDocumentEnd()
+}
+
+// DecodeValue is the ValueDecoder for map[string/decimal]* types.
+func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if val.Kind() != reflect.Map || (!val.CanSet() && val.IsNil()) {
+ return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val}
+ }
+
+ switch vrType := vr.Type(); vrType {
+ case bsontype.Type(0), bsontype.EmbeddedDocument:
+ case bsontype.Null:
+ val.Set(reflect.Zero(val.Type()))
+ return vr.ReadNull()
+ case bsontype.Undefined:
+ val.Set(reflect.Zero(val.Type()))
+ return vr.ReadUndefined()
+ default:
+ return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type())
+ }
+
+ dr, err := vr.ReadDocument()
+ if err != nil {
+ return err
+ }
+
+ if val.IsNil() {
+ val.Set(reflect.MakeMap(val.Type()))
+ }
+
+ if val.Len() > 0 && (mc.DecodeZerosMap || dc.zeroMaps) {
+ clearMap(val)
+ }
+
+ eType := val.Type().Elem()
+ decoder, err := dc.LookupDecoder(eType)
+ if err != nil {
+ return err
+ }
+ eTypeDecoder, _ := decoder.(typeDecoder)
+
+ if eType == tEmpty {
+ dc.Ancestor = val.Type()
+ }
+
+ keyType := val.Type().Key()
+
+ for {
+ key, vr, err := dr.ReadElement()
+ if errors.Is(err, bsonrw.ErrEOD) {
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ k, err := mc.decodeKey(key, keyType)
+ if err != nil {
+ return err
+ }
+
+ elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true)
+ if err != nil {
+ return newDecodeError(key, err)
+ }
+
+ val.SetMapIndex(k, elem)
+ }
+ return nil
+}
+
+func clearMap(m reflect.Value) {
+ var none reflect.Value
+ for _, k := range m.MapKeys() {
+ m.SetMapIndex(k, none)
+ }
+}
+
+func (mc *MapCodec) encodeKey(val reflect.Value, encodeKeysWithStringer bool) (string, error) {
+ if mc.EncodeKeysWithStringer || encodeKeysWithStringer {
+ return fmt.Sprint(val), nil
+ }
+
+ // keys of any string type are used directly
+ if val.Kind() == reflect.String {
+ return val.String(), nil
+ }
+ // KeyMarshalers are marshaled
+ if km, ok := val.Interface().(KeyMarshaler); ok {
+ if val.Kind() == reflect.Ptr && val.IsNil() {
+ return "", nil
+ }
+ buf, err := km.MarshalKey()
+ if err == nil {
+ return buf, nil
+ }
+ return "", err
+ }
+ // keys implement encoding.TextMarshaler are marshaled.
+ if km, ok := val.Interface().(encoding.TextMarshaler); ok {
+ if val.Kind() == reflect.Ptr && val.IsNil() {
+ return "", nil
+ }
+
+ buf, err := km.MarshalText()
+ if err != nil {
+ return "", err
+ }
+
+ return string(buf), nil
+ }
+
+ switch val.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return strconv.FormatInt(val.Int(), 10), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return strconv.FormatUint(val.Uint(), 10), nil
+ }
+ return "", fmt.Errorf("unsupported key type: %v", val.Type())
+}
+
+var keyUnmarshalerType = reflect.TypeOf((*KeyUnmarshaler)(nil)).Elem()
+var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+
+func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, error) {
+ keyVal := reflect.ValueOf(key)
+ var err error
+ switch {
+ // First, if EncodeKeysWithStringer is not enabled, try to decode withKeyUnmarshaler
+ case !mc.EncodeKeysWithStringer && reflect.PtrTo(keyType).Implements(keyUnmarshalerType):
+ keyVal = reflect.New(keyType)
+ v := keyVal.Interface().(KeyUnmarshaler)
+ err = v.UnmarshalKey(key)
+ keyVal = keyVal.Elem()
+ // Try to decode encoding.TextUnmarshalers.
+ case reflect.PtrTo(keyType).Implements(textUnmarshalerType):
+ keyVal = reflect.New(keyType)
+ v := keyVal.Interface().(encoding.TextUnmarshaler)
+ err = v.UnmarshalText([]byte(key))
+ keyVal = keyVal.Elem()
+ // Otherwise, go to type specific behavior
+ default:
+ switch keyType.Kind() {
+ case reflect.String:
+ keyVal = reflect.ValueOf(key).Convert(keyType)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n, parseErr := strconv.ParseInt(key, 10, 64)
+ if parseErr != nil || reflect.Zero(keyType).OverflowInt(n) {
+ err = fmt.Errorf("failed to unmarshal number key %v", key)
+ }
+ keyVal = reflect.ValueOf(n).Convert(keyType)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ n, parseErr := strconv.ParseUint(key, 10, 64)
+ if parseErr != nil || reflect.Zero(keyType).OverflowUint(n) {
+ err = fmt.Errorf("failed to unmarshal number key %v", key)
+ break
+ }
+ keyVal = reflect.ValueOf(n).Convert(keyType)
+ case reflect.Float32, reflect.Float64:
+ if mc.EncodeKeysWithStringer {
+ parsed, err := strconv.ParseFloat(key, 64)
+ if err != nil {
+ return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %w", keyType.Kind(), err)
+ }
+ keyVal = reflect.ValueOf(parsed)
+ break
+ }
+ fallthrough
+ default:
+ return keyVal, fmt.Errorf("unsupported key type: %v", keyType)
+ }
+ }
+ return keyVal, err
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go
new file mode 100644
index 00000000000..fbd9f0a9e97
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go
@@ -0,0 +1,65 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import "fmt"
+
+type mode int
+
+const (
+ _ mode = iota
+ mTopLevel
+ mDocument
+ mArray
+ mValue
+ mElement
+ mCodeWithScope
+ mSpacer
+)
+
+func (m mode) String() string {
+ var str string
+
+ switch m {
+ case mTopLevel:
+ str = "TopLevel"
+ case mDocument:
+ str = "DocumentMode"
+ case mArray:
+ str = "ArrayMode"
+ case mValue:
+ str = "ValueMode"
+ case mElement:
+ str = "ElementMode"
+ case mCodeWithScope:
+ str = "CodeWithScopeMode"
+ case mSpacer:
+ str = "CodeWithScopeSpacerFrame"
+ default:
+ str = "UnknownMode"
+ }
+
+ return str
+}
+
+// TransitionError is an error returned when an invalid progressing a
+// ValueReader or ValueWriter state machine occurs.
+type TransitionError struct {
+ parent mode
+ current mode
+ destination mode
+}
+
+func (te TransitionError) Error() string {
+ if te.destination == mode(0) {
+ return fmt.Sprintf("invalid state transition: cannot read/write value while in %s", te.current)
+ }
+ if te.parent == mode(0) {
+ return fmt.Sprintf("invalid state transition: %s -> %s", te.current, te.destination)
+ }
+ return fmt.Sprintf("invalid state transition: %s -> %s; parent %s", te.current, te.destination, te.parent)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go
new file mode 100644
index 00000000000..ddfa4a33e18
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go
@@ -0,0 +1,108 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+ "reflect"
+
+ "go.mongodb.org/mongo-driver/bson/bsonrw"
+ "go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+var _ ValueEncoder = &PointerCodec{}
+var _ ValueDecoder = &PointerCodec{}
+
+// PointerCodec is the Codec used for pointers.
+//
+// Deprecated: PointerCodec will not be directly accessible in Go Driver 2.0. To
+// override the default pointer encode and decode behavior, create a new registry
+// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new
+// encoder and decoder for pointers.
+//
+// For example,
+//
+// reg := bson.NewRegistry()
+// reg.RegisterKindEncoder(reflect.Ptr, myPointerEncoder)
+// reg.RegisterKindDecoder(reflect.Ptr, myPointerDecoder)
+type PointerCodec struct {
+ ecache typeEncoderCache
+ dcache typeDecoderCache
+}
+
+// NewPointerCodec returns a PointerCodec that has been initialized.
+//
+// Deprecated: NewPointerCodec will not be available in Go Driver 2.0. See
+// [PointerCodec] for more details.
+func NewPointerCodec() *PointerCodec {
+ return &PointerCodec{}
+}
+
+// EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil
+// or looking up an encoder for the type of value the pointer points to.
+func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if val.Kind() != reflect.Ptr {
+ if !val.IsValid() {
+ return vw.WriteNull()
+ }
+ return ValueEncoderError{Name: "PointerCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val}
+ }
+
+ if val.IsNil() {
+ return vw.WriteNull()
+ }
+
+ typ := val.Type()
+ if v, ok := pc.ecache.Load(typ); ok {
+ if v == nil {
+ return ErrNoEncoder{Type: typ}
+ }
+ return v.EncodeValue(ec, vw, val.Elem())
+ }
+ // TODO(charlie): handle concurrent requests for the same type
+ enc, err := ec.LookupEncoder(typ.Elem())
+ enc = pc.ecache.LoadOrStore(typ, enc)
+ if err != nil {
+ return err
+ }
+ return enc.EncodeValue(ec, vw, val.Elem())
+}
+
+// DecodeValue handles decoding a pointer by looking up a decoder for the type it points to and
+// using that to decode. If the BSON value is Null, this method will set the pointer to nil.
+func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Kind() != reflect.Ptr {
+ return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val}
+ }
+
+ typ := val.Type()
+ if vr.Type() == bsontype.Null {
+ val.Set(reflect.Zero(typ))
+ return vr.ReadNull()
+ }
+ if vr.Type() == bsontype.Undefined {
+ val.Set(reflect.Zero(typ))
+ return vr.ReadUndefined()
+ }
+
+ if val.IsNil() {
+ val.Set(reflect.New(typ.Elem()))
+ }
+
+ if v, ok := pc.dcache.Load(typ); ok {
+ if v == nil {
+ return ErrNoDecoder{Type: typ}
+ }
+ return v.DecodeValue(dc, vr, val.Elem())
+ }
+ // TODO(charlie): handle concurrent requests for the same type
+ dec, err := dc.LookupDecoder(typ.Elem())
+ dec = pc.dcache.LoadOrStore(typ, dec)
+ if err != nil {
+ return err
+ }
+ return dec.DecodeValue(dc, vr, val.Elem())
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go
new file mode 100644
index 00000000000..4cf2b01ab48
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go
@@ -0,0 +1,14 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+// Proxy is an interface implemented by types that cannot themselves be directly encoded. Types
+// that implement this interface with have ProxyBSON called during the encoding process and that
+// value will be encoded in place for the implementer.
+type Proxy interface {
+ ProxyBSON() (interface{}, error)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go
new file mode 100644
index 00000000000..196c491bbbf
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go
@@ -0,0 +1,524 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sync"
+
+ "go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+// ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder.
+//
+// Deprecated: ErrNilType will not be supported in Go Driver 2.0.
+var ErrNilType = errors.New("cannot perform a decoder lookup on ")
+
+// ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder.
+//
+// Deprecated: ErrNotPointer will not be supported in Go Driver 2.0.
+var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder")
+
+// ErrNoEncoder is returned when there wasn't an encoder available for a type.
+//
+// Deprecated: ErrNoEncoder will not be supported in Go Driver 2.0.
+type ErrNoEncoder struct {
+ Type reflect.Type
+}
+
+func (ene ErrNoEncoder) Error() string {
+ if ene.Type == nil {
+ return "no encoder found for "
+ }
+ return "no encoder found for " + ene.Type.String()
+}
+
+// ErrNoDecoder is returned when there wasn't a decoder available for a type.
+//
+// Deprecated: ErrNoDecoder will not be supported in Go Driver 2.0.
+type ErrNoDecoder struct {
+ Type reflect.Type
+}
+
+func (end ErrNoDecoder) Error() string {
+ return "no decoder found for " + end.Type.String()
+}
+
+// ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type.
+//
+// Deprecated: ErrNoTypeMapEntry will not be supported in Go Driver 2.0.
+type ErrNoTypeMapEntry struct {
+ Type bsontype.Type
+}
+
+func (entme ErrNoTypeMapEntry) Error() string {
+ return "no type map entry found for " + entme.Type.String()
+}
+
+// ErrNotInterface is returned when the provided type is not an interface.
+//
+// Deprecated: ErrNotInterface will not be supported in Go Driver 2.0.
+var ErrNotInterface = errors.New("The provided type is not an interface")
+
+// A RegistryBuilder is used to build a Registry. This type is not goroutine
+// safe.
+//
+// Deprecated: Use Registry instead.
+type RegistryBuilder struct {
+ registry *Registry
+}
+
+// NewRegistryBuilder creates a new empty RegistryBuilder.
+//
+// Deprecated: Use NewRegistry instead.
+func NewRegistryBuilder() *RegistryBuilder {
+ return &RegistryBuilder{
+ registry: NewRegistry(),
+ }
+}
+
+// RegisterCodec will register the provided ValueCodec for the provided type.
+//
+// Deprecated: Use Registry.RegisterTypeEncoder and Registry.RegisterTypeDecoder instead.
+func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder {
+ rb.RegisterTypeEncoder(t, codec)
+ rb.RegisterTypeDecoder(t, codec)
+ return rb
+}
+
+// RegisterTypeEncoder will register the provided ValueEncoder for the provided type.
+//
+// The type will be used directly, so an encoder can be registered for a type and a different encoder can be registered
+// for a pointer to that type.
+//
+// If the given type is an interface, the encoder will be called when marshaling a type that is that interface. It
+// will not be called when marshaling a non-interface type that implements the interface.
+//
+// Deprecated: Use Registry.RegisterTypeEncoder instead.
+func (rb *RegistryBuilder) RegisterTypeEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder {
+ rb.registry.RegisterTypeEncoder(t, enc)
+ return rb
+}
+
+// RegisterHookEncoder will register an encoder for the provided interface type t. This encoder will be called when
+// marshaling a type if the type implements t or a pointer to the type implements t. If the provided type is not
+// an interface (i.e. t.Kind() != reflect.Interface), this method will panic.
+//
+// Deprecated: Use Registry.RegisterInterfaceEncoder instead.
+func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder {
+ rb.registry.RegisterInterfaceEncoder(t, enc)
+ return rb
+}
+
+// RegisterTypeDecoder will register the provided ValueDecoder for the provided type.
+//
+// The type will be used directly, so a decoder can be registered for a type and a different decoder can be registered
+// for a pointer to that type.
+//
+// If the given type is an interface, the decoder will be called when unmarshaling into a type that is that interface.
+// It will not be called when unmarshaling into a non-interface type that implements the interface.
+//
+// Deprecated: Use Registry.RegisterTypeDecoder instead.
+func (rb *RegistryBuilder) RegisterTypeDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder {
+ rb.registry.RegisterTypeDecoder(t, dec)
+ return rb
+}
+
+// RegisterHookDecoder will register an decoder for the provided interface type t. This decoder will be called when
+// unmarshaling into a type if the type implements t or a pointer to the type implements t. If the provided type is not
+// an interface (i.e. t.Kind() != reflect.Interface), this method will panic.
+//
+// Deprecated: Use Registry.RegisterInterfaceDecoder instead.
+func (rb *RegistryBuilder) RegisterHookDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder {
+ rb.registry.RegisterInterfaceDecoder(t, dec)
+ return rb
+}
+
+// RegisterEncoder registers the provided type and encoder pair.
+//
+// Deprecated: Use Registry.RegisterTypeEncoder or Registry.RegisterInterfaceEncoder instead.
+func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder {
+ if t == tEmpty {
+ rb.registry.RegisterTypeEncoder(t, enc)
+ return rb
+ }
+ switch t.Kind() {
+ case reflect.Interface:
+ rb.registry.RegisterInterfaceEncoder(t, enc)
+ default:
+ rb.registry.RegisterTypeEncoder(t, enc)
+ }
+ return rb
+}
+
+// RegisterDecoder registers the provided type and decoder pair.
+//
+// Deprecated: Use Registry.RegisterTypeDecoder or Registry.RegisterInterfaceDecoder instead.
+func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder {
+ if t == nil {
+ rb.registry.RegisterTypeDecoder(t, dec)
+ return rb
+ }
+ if t == tEmpty {
+ rb.registry.RegisterTypeDecoder(t, dec)
+ return rb
+ }
+ switch t.Kind() {
+ case reflect.Interface:
+ rb.registry.RegisterInterfaceDecoder(t, dec)
+ default:
+ rb.registry.RegisterTypeDecoder(t, dec)
+ }
+ return rb
+}
+
+// RegisterDefaultEncoder will register the provided ValueEncoder to the provided
+// kind.
+//
+// Deprecated: Use Registry.RegisterKindEncoder instead.
+func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder {
+ rb.registry.RegisterKindEncoder(kind, enc)
+ return rb
+}
+
+// RegisterDefaultDecoder will register the provided ValueDecoder to the
+// provided kind.
+//
+// Deprecated: Use Registry.RegisterKindDecoder instead.
+func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder {
+ rb.registry.RegisterKindDecoder(kind, dec)
+ return rb
+}
+
+// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this
+// mapping is decoding situations where an empty interface is used and a default type needs to be
+// created and decoded into.
+//
+// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON
+// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents
+// to decode to bson.Raw, use the following code:
+//
+// rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{}))
+//
+// Deprecated: Use Registry.RegisterTypeMapEntry instead.
+func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder {
+ rb.registry.RegisterTypeMapEntry(bt, rt)
+ return rb
+}
+
+// Build creates a Registry from the current state of this RegistryBuilder.
+//
+// Deprecated: Use NewRegistry instead.
+func (rb *RegistryBuilder) Build() *Registry {
+ r := &Registry{
+ interfaceEncoders: append([]interfaceValueEncoder(nil), rb.registry.interfaceEncoders...),
+ interfaceDecoders: append([]interfaceValueDecoder(nil), rb.registry.interfaceDecoders...),
+ typeEncoders: rb.registry.typeEncoders.Clone(),
+ typeDecoders: rb.registry.typeDecoders.Clone(),
+ kindEncoders: rb.registry.kindEncoders.Clone(),
+ kindDecoders: rb.registry.kindDecoders.Clone(),
+ }
+ rb.registry.typeMap.Range(func(k, v interface{}) bool {
+ if k != nil && v != nil {
+ r.typeMap.Store(k, v)
+ }
+ return true
+ })
+ return r
+}
+
+// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main
+// typed passed around and Encoders and Decoders are constructed from it.
+type Registry struct {
+ interfaceEncoders []interfaceValueEncoder
+ interfaceDecoders []interfaceValueDecoder
+ typeEncoders *typeEncoderCache
+ typeDecoders *typeDecoderCache
+ kindEncoders *kindEncoderCache
+ kindDecoders *kindDecoderCache
+ typeMap sync.Map // map[bsontype.Type]reflect.Type
+}
+
+// NewRegistry creates a new empty Registry.
+func NewRegistry() *Registry {
+ return &Registry{
+ typeEncoders: new(typeEncoderCache),
+ typeDecoders: new(typeDecoderCache),
+ kindEncoders: new(kindEncoderCache),
+ kindDecoders: new(kindDecoderCache),
+ }
+}
+
+// RegisterTypeEncoder registers the provided ValueEncoder for the provided type.
+//
+// The type will be used as provided, so an encoder can be registered for a type and a different
+// encoder can be registered for a pointer to that type.
+//
+// If the given type is an interface, the encoder will be called when marshaling a type that is
+// that interface. It will not be called when marshaling a non-interface type that implements the
+// interface. To get the latter behavior, call RegisterHookEncoder instead.
+//
+// RegisterTypeEncoder should not be called concurrently with any other Registry method.
+func (r *Registry) RegisterTypeEncoder(valueType reflect.Type, enc ValueEncoder) {
+ r.typeEncoders.Store(valueType, enc)
+}
+
+// RegisterTypeDecoder registers the provided ValueDecoder for the provided type.
+//
+// The type will be used as provided, so a decoder can be registered for a type and a different
+// decoder can be registered for a pointer to that type.
+//
+// If the given type is an interface, the decoder will be called when unmarshaling into a type that
+// is that interface. It will not be called when unmarshaling into a non-interface type that
+// implements the interface. To get the latter behavior, call RegisterHookDecoder instead.
+//
+// RegisterTypeDecoder should not be called concurrently with any other Registry method.
+func (r *Registry) RegisterTypeDecoder(valueType reflect.Type, dec ValueDecoder) {
+ r.typeDecoders.Store(valueType, dec)
+}
+
+// RegisterKindEncoder registers the provided ValueEncoder for the provided kind.
+//
+// Use RegisterKindEncoder to register an encoder for any type with the same underlying kind. For
+// example, consider the type MyInt defined as
+//
+// type MyInt int32
+//
+// To define an encoder for MyInt and int32, use RegisterKindEncoder like
+//
+// reg.RegisterKindEncoder(reflect.Int32, myEncoder)
+//
+// RegisterKindEncoder should not be called concurrently with any other Registry method.
+func (r *Registry) RegisterKindEncoder(kind reflect.Kind, enc ValueEncoder) {
+ r.kindEncoders.Store(kind, enc)
+}
+
+// RegisterKindDecoder registers the provided ValueDecoder for the provided kind.
+//
+// Use RegisterKindDecoder to register a decoder for any type with the same underlying kind. For
+// example, consider the type MyInt defined as
+//
+// type MyInt int32
+//
+// To define an decoder for MyInt and int32, use RegisterKindDecoder like
+//
+// reg.RegisterKindDecoder(reflect.Int32, myDecoder)
+//
+// RegisterKindDecoder should not be called concurrently with any other Registry method.
+func (r *Registry) RegisterKindDecoder(kind reflect.Kind, dec ValueDecoder) {
+ r.kindDecoders.Store(kind, dec)
+}
+
+// RegisterInterfaceEncoder registers an encoder for the provided interface type iface. This encoder will
+// be called when marshaling a type if the type implements iface or a pointer to the type
+// implements iface. If the provided type is not an interface
+// (i.e. iface.Kind() != reflect.Interface), this method will panic.
+//
+// RegisterInterfaceEncoder should not be called concurrently with any other Registry method.
+func (r *Registry) RegisterInterfaceEncoder(iface reflect.Type, enc ValueEncoder) {
+ if iface.Kind() != reflect.Interface {
+ panicStr := fmt.Errorf("RegisterInterfaceEncoder expects a type with kind reflect.Interface, "+
+ "got type %s with kind %s", iface, iface.Kind())
+ panic(panicStr)
+ }
+
+ for idx, encoder := range r.interfaceEncoders {
+ if encoder.i == iface {
+ r.interfaceEncoders[idx].ve = enc
+ return
+ }
+ }
+
+ r.interfaceEncoders = append(r.interfaceEncoders, interfaceValueEncoder{i: iface, ve: enc})
+}
+
+// RegisterInterfaceDecoder registers an decoder for the provided interface type iface. This decoder will
+// be called when unmarshaling into a type if the type implements iface or a pointer to the type
+// implements iface. If the provided type is not an interface (i.e. iface.Kind() != reflect.Interface),
+// this method will panic.
+//
+// RegisterInterfaceDecoder should not be called concurrently with any other Registry method.
+func (r *Registry) RegisterInterfaceDecoder(iface reflect.Type, dec ValueDecoder) {
+ if iface.Kind() != reflect.Interface {
+ panicStr := fmt.Errorf("RegisterInterfaceDecoder expects a type with kind reflect.Interface, "+
+ "got type %s with kind %s", iface, iface.Kind())
+ panic(panicStr)
+ }
+
+ for idx, decoder := range r.interfaceDecoders {
+ if decoder.i == iface {
+ r.interfaceDecoders[idx].vd = dec
+ return
+ }
+ }
+
+ r.interfaceDecoders = append(r.interfaceDecoders, interfaceValueDecoder{i: iface, vd: dec})
+}
+
+// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this
+// mapping is decoding situations where an empty interface is used and a default type needs to be
+// created and decoded into.
+//
+// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON
+// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents
+// to decode to bson.Raw, use the following code:
+//
+// reg.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{}))
+func (r *Registry) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) {
+ r.typeMap.Store(bt, rt)
+}
+
+// LookupEncoder returns the first matching encoder in the Registry. It uses the following lookup
+// order:
+//
+// 1. An encoder registered for the exact type. If the given type is an interface, an encoder
+// registered using RegisterTypeEncoder for that interface will be selected.
+//
+// 2. An encoder registered using RegisterInterfaceEncoder for an interface implemented by the type
+// or by a pointer to the type.
+//
+// 3. An encoder registered using RegisterKindEncoder for the kind of value.
+//
+// If no encoder is found, an error of type ErrNoEncoder is returned. LookupEncoder is safe for
+// concurrent use by multiple goroutines after all codecs and encoders are registered.
+func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) {
+ if valueType == nil {
+ return nil, ErrNoEncoder{Type: valueType}
+ }
+ enc, found := r.lookupTypeEncoder(valueType)
+ if found {
+ if enc == nil {
+ return nil, ErrNoEncoder{Type: valueType}
+ }
+ return enc, nil
+ }
+
+ enc, found = r.lookupInterfaceEncoder(valueType, true)
+ if found {
+ return r.typeEncoders.LoadOrStore(valueType, enc), nil
+ }
+
+ if v, ok := r.kindEncoders.Load(valueType.Kind()); ok {
+ return r.storeTypeEncoder(valueType, v), nil
+ }
+ return nil, ErrNoEncoder{Type: valueType}
+}
+
+func (r *Registry) storeTypeEncoder(rt reflect.Type, enc ValueEncoder) ValueEncoder {
+ return r.typeEncoders.LoadOrStore(rt, enc)
+}
+
+func (r *Registry) lookupTypeEncoder(rt reflect.Type) (ValueEncoder, bool) {
+ return r.typeEncoders.Load(rt)
+}
+
+func (r *Registry) lookupInterfaceEncoder(valueType reflect.Type, allowAddr bool) (ValueEncoder, bool) {
+ if valueType == nil {
+ return nil, false
+ }
+ for _, ienc := range r.interfaceEncoders {
+ if valueType.Implements(ienc.i) {
+ return ienc.ve, true
+ }
+ if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(ienc.i) {
+ // if *t implements an interface, this will catch if t implements an interface further
+ // ahead in interfaceEncoders
+ defaultEnc, found := r.lookupInterfaceEncoder(valueType, false)
+ if !found {
+ defaultEnc, _ = r.kindEncoders.Load(valueType.Kind())
+ }
+ return newCondAddrEncoder(ienc.ve, defaultEnc), true
+ }
+ }
+ return nil, false
+}
+
+// LookupDecoder returns the first matching decoder in the Registry. It uses the following lookup
+// order:
+//
+// 1. A decoder registered for the exact type. If the given type is an interface, a decoder
+// registered using RegisterTypeDecoder for that interface will be selected.
+//
+// 2. A decoder registered using RegisterInterfaceDecoder for an interface implemented by the type or by
+// a pointer to the type.
+//
+// 3. A decoder registered using RegisterKindDecoder for the kind of value.
+//
+// If no decoder is found, an error of type ErrNoDecoder is returned. LookupDecoder is safe for
+// concurrent use by multiple goroutines after all codecs and decoders are registered.
+func (r *Registry) LookupDecoder(valueType reflect.Type) (ValueDecoder, error) {
+ if valueType == nil {
+ return nil, ErrNilType
+ }
+ dec, found := r.lookupTypeDecoder(valueType)
+ if found {
+ if dec == nil {
+ return nil, ErrNoDecoder{Type: valueType}
+ }
+ return dec, nil
+ }
+
+ dec, found = r.lookupInterfaceDecoder(valueType, true)
+ if found {
+ return r.storeTypeDecoder(valueType, dec), nil
+ }
+
+ if v, ok := r.kindDecoders.Load(valueType.Kind()); ok {
+ return r.storeTypeDecoder(valueType, v), nil
+ }
+ return nil, ErrNoDecoder{Type: valueType}
+}
+
+func (r *Registry) lookupTypeDecoder(valueType reflect.Type) (ValueDecoder, bool) {
+ return r.typeDecoders.Load(valueType)
+}
+
+func (r *Registry) storeTypeDecoder(typ reflect.Type, dec ValueDecoder) ValueDecoder {
+ return r.typeDecoders.LoadOrStore(typ, dec)
+}
+
+func (r *Registry) lookupInterfaceDecoder(valueType reflect.Type, allowAddr bool) (ValueDecoder, bool) {
+ for _, idec := range r.interfaceDecoders {
+ if valueType.Implements(idec.i) {
+ return idec.vd, true
+ }
+ if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(idec.i) {
+ // if *t implements an interface, this will catch if t implements an interface further
+ // ahead in interfaceDecoders
+ defaultDec, found := r.lookupInterfaceDecoder(valueType, false)
+ if !found {
+ defaultDec, _ = r.kindDecoders.Load(valueType.Kind())
+ }
+ return newCondAddrDecoder(idec.vd, defaultDec), true
+ }
+ }
+ return nil, false
+}
+
+// LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON
+// type. If no type is found, ErrNoTypeMapEntry is returned.
+//
+// LookupTypeMapEntry should not be called concurrently with any other Registry method.
+func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) {
+ v, ok := r.typeMap.Load(bt)
+ if v == nil || !ok {
+ return nil, ErrNoTypeMapEntry{Type: bt}
+ }
+ return v.(reflect.Type), nil
+}
+
+type interfaceValueEncoder struct {
+ i reflect.Type
+ ve ValueEncoder
+}
+
+type interfaceValueDecoder struct {
+ i reflect.Type
+ vd ValueDecoder
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go
new file mode 100644
index 00000000000..14c9fd25646
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go
@@ -0,0 +1,214 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "go.mongodb.org/mongo-driver/bson/bsonoptions"
+ "go.mongodb.org/mongo-driver/bson/bsonrw"
+ "go.mongodb.org/mongo-driver/bson/bsontype"
+ "go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+var defaultSliceCodec = NewSliceCodec()
+
+// SliceCodec is the Codec used for slice values.
+//
+// Deprecated: SliceCodec will not be directly configurable in Go Driver 2.0. To
+// configure the slice encode and decode behavior, use the configuration methods
+// on a [go.mongodb.org/mongo-driver/bson.Encoder] or
+// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the slice encode and
+// decode behavior for a mongo.Client, use
+// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions].
+//
+// For example, to configure a mongo.Client to marshal nil Go slices as empty
+// BSON arrays, use:
+//
+// opt := options.Client().SetBSONOptions(&options.BSONOptions{
+// NilSliceAsEmpty: true,
+// })
+//
+// See the deprecation notice for each field in SliceCodec for the corresponding
+// settings.
+type SliceCodec struct {
+ // EncodeNilAsEmpty causes EncodeValue to marshal nil Go slices as empty BSON arrays instead of
+ // BSON null.
+ //
+ // Deprecated: Use bson.Encoder.NilSliceAsEmpty instead.
+ EncodeNilAsEmpty bool
+}
+
+// NewSliceCodec returns a MapCodec with options opts.
+//
+// Deprecated: NewSliceCodec will not be available in Go Driver 2.0. See
+// [SliceCodec] for more details.
+func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec {
+ sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...)
+
+ codec := SliceCodec{}
+ if sliceOpt.EncodeNilAsEmpty != nil {
+ codec.EncodeNilAsEmpty = *sliceOpt.EncodeNilAsEmpty
+ }
+ return &codec
+}
+
+// EncodeValue is the ValueEncoder for slice types.
+func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Kind() != reflect.Slice {
+ return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val}
+ }
+
+ if val.IsNil() && !sc.EncodeNilAsEmpty && !ec.nilSliceAsEmpty {
+ return vw.WriteNull()
+ }
+
+ // If we have a []byte we want to treat it as a binary instead of as an array.
+ if val.Type().Elem() == tByte {
+ byteSlice := make([]byte, val.Len())
+ reflect.Copy(reflect.ValueOf(byteSlice), val)
+ return vw.WriteBinary(byteSlice)
+ }
+
+ // If we have a []primitive.E we want to treat it as a document instead of as an array.
+ if val.Type() == tD || val.Type().ConvertibleTo(tD) {
+ d := val.Convert(tD).Interface().(primitive.D)
+
+ dw, err := vw.WriteDocument()
+ if err != nil {
+ return err
+ }
+
+ for _, e := range d {
+ err = encodeElement(ec, dw, e)
+ if err != nil {
+ return err
+ }
+ }
+
+ return dw.WriteDocumentEnd()
+ }
+
+ aw, err := vw.WriteArray()
+ if err != nil {
+ return err
+ }
+
+ elemType := val.Type().Elem()
+ encoder, err := ec.LookupEncoder(elemType)
+ if err != nil && elemType.Kind() != reflect.Interface {
+ return err
+ }
+
+ for idx := 0; idx < val.Len(); idx++ {
+ currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.Index(idx))
+ if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) {
+ return lookupErr
+ }
+
+ vw, err := aw.WriteArrayElement()
+ if err != nil {
+ return err
+ }
+
+ if errors.Is(lookupErr, errInvalidValue) {
+ err = vw.WriteNull()
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ err = currEncoder.EncodeValue(ec, vw, currVal)
+ if err != nil {
+ return err
+ }
+ }
+ return aw.WriteArrayEnd()
+}
+
+// DecodeValue is the ValueDecoder for slice types.
+func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Kind() != reflect.Slice {
+ return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val}
+ }
+
+ switch vrType := vr.Type(); vrType {
+ case bsontype.Array:
+ case bsontype.Null:
+ val.Set(reflect.Zero(val.Type()))
+ return vr.ReadNull()
+ case bsontype.Undefined:
+ val.Set(reflect.Zero(val.Type()))
+ return vr.ReadUndefined()
+ case bsontype.Type(0), bsontype.EmbeddedDocument:
+ if val.Type().Elem() != tE {
+ return fmt.Errorf("cannot decode document into %s", val.Type())
+ }
+ case bsontype.Binary:
+ if val.Type().Elem() != tByte {
+ return fmt.Errorf("SliceDecodeValue can only decode a binary into a byte array, got %v", vrType)
+ }
+ data, subtype, err := vr.ReadBinary()
+ if err != nil {
+ return err
+ }
+ if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld {
+ return fmt.Errorf("SliceDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype)
+ }
+
+ if val.IsNil() {
+ val.Set(reflect.MakeSlice(val.Type(), 0, len(data)))
+ }
+ val.SetLen(0)
+ val.Set(reflect.AppendSlice(val, reflect.ValueOf(data)))
+ return nil
+ case bsontype.String:
+ if sliceType := val.Type().Elem(); sliceType != tByte {
+ return fmt.Errorf("SliceDecodeValue can only decode a string into a byte array, got %v", sliceType)
+ }
+ str, err := vr.ReadString()
+ if err != nil {
+ return err
+ }
+ byteStr := []byte(str)
+
+ if val.IsNil() {
+ val.Set(reflect.MakeSlice(val.Type(), 0, len(byteStr)))
+ }
+ val.SetLen(0)
+ val.Set(reflect.AppendSlice(val, reflect.ValueOf(byteStr)))
+ return nil
+ default:
+ return fmt.Errorf("cannot decode %v into a slice", vrType)
+ }
+
+ var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error)
+ switch val.Type().Elem() {
+ case tE:
+ dc.Ancestor = val.Type()
+ elemsFunc = defaultValueDecoders.decodeD
+ default:
+ elemsFunc = defaultValueDecoders.decodeDefault
+ }
+
+ elems, err := elemsFunc(dc, vr, val)
+ if err != nil {
+ return err
+ }
+
+ if val.IsNil() {
+ val.Set(reflect.MakeSlice(val.Type(), 0, len(elems)))
+ }
+
+ val.SetLen(0)
+ val.Set(reflect.Append(val, elems...))
+
+ return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go
new file mode 100644
index 00000000000..a8f885a854f
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go
@@ -0,0 +1,140 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+ "fmt"
+ "reflect"
+
+ "go.mongodb.org/mongo-driver/bson/bsonoptions"
+ "go.mongodb.org/mongo-driver/bson/bsonrw"
+ "go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+// StringCodec is the Codec used for string values.
+//
+// Deprecated: StringCodec will not be directly accessible in Go Driver 2.0. To
+// override the default string encode and decode behavior, create a new registry
+// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new
+// encoder and decoder for strings.
+//
+// For example,
+//
+// reg := bson.NewRegistry()
+// reg.RegisterKindEncoder(reflect.String, myStringEncoder)
+// reg.RegisterKindDecoder(reflect.String, myStringDecoder)
+type StringCodec struct {
+ // DecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation.
+ // If false, a string made from the raw object ID bytes will be used. Defaults to true.
+ //
+ // Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0.
+ DecodeObjectIDAsHex bool
+}
+
+var (
+ defaultStringCodec = NewStringCodec()
+
+ // Assert that defaultStringCodec satisfies the typeDecoder interface, which allows it to be
+ // used by collection type decoders (e.g. map, slice, etc) to set individual values in a
+ // collection.
+ _ typeDecoder = defaultStringCodec
+)
+
+// NewStringCodec returns a StringCodec with options opts.
+//
+// Deprecated: NewStringCodec will not be available in Go Driver 2.0. See
+// [StringCodec] for more details.
+func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec {
+ stringOpt := bsonoptions.MergeStringCodecOptions(opts...)
+ return &StringCodec{*stringOpt.DecodeObjectIDAsHex}
+}
+
+// EncodeValue is the ValueEncoder for string types.
+func (sc *StringCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if val.Kind() != reflect.String {
+ return ValueEncoderError{
+ Name: "StringEncodeValue",
+ Kinds: []reflect.Kind{reflect.String},
+ Received: val,
+ }
+ }
+
+ return vw.WriteString(val.String())
+}
+
+func (sc *StringCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ if t.Kind() != reflect.String {
+ return emptyValue, ValueDecoderError{
+ Name: "StringDecodeValue",
+ Kinds: []reflect.Kind{reflect.String},
+ Received: reflect.Zero(t),
+ }
+ }
+
+ var str string
+ var err error
+ switch vr.Type() {
+ case bsontype.String:
+ str, err = vr.ReadString()
+ if err != nil {
+ return emptyValue, err
+ }
+ case bsontype.ObjectID:
+ oid, err := vr.ReadObjectID()
+ if err != nil {
+ return emptyValue, err
+ }
+ if sc.DecodeObjectIDAsHex {
+ str = oid.Hex()
+ } else {
+ // TODO(GODRIVER-2796): Return an error here instead of decoding to a garbled string.
+ byteArray := [12]byte(oid)
+ str = string(byteArray[:])
+ }
+ case bsontype.Symbol:
+ str, err = vr.ReadSymbol()
+ if err != nil {
+ return emptyValue, err
+ }
+ case bsontype.Binary:
+ data, subtype, err := vr.ReadBinary()
+ if err != nil {
+ return emptyValue, err
+ }
+ if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld {
+ return emptyValue, decodeBinaryError{subtype: subtype, typeName: "string"}
+ }
+ str = string(data)
+ case bsontype.Null:
+ if err = vr.ReadNull(); err != nil {
+ return emptyValue, err
+ }
+ case bsontype.Undefined:
+ if err = vr.ReadUndefined(); err != nil {
+ return emptyValue, err
+ }
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into a string type", vr.Type())
+ }
+
+ return reflect.ValueOf(str), nil
+}
+
+// DecodeValue is the ValueDecoder for string types.
+func (sc *StringCodec) DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Kind() != reflect.String {
+ return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val}
+ }
+
+ elem, err := sc.decodeType(dctx, vr, val.Type())
+ if err != nil {
+ return err
+ }
+
+ val.SetString(elem.String())
+ return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go
new file mode 100644
index 00000000000..f8d9690c139
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go
@@ -0,0 +1,736 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "go.mongodb.org/mongo-driver/bson/bsonoptions"
+ "go.mongodb.org/mongo-driver/bson/bsonrw"
+ "go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+// DecodeError represents an error that occurs when unmarshalling BSON bytes into a native Go type.
+type DecodeError struct {
+ keys []string
+ wrapped error
+}
+
+// Unwrap returns the underlying error
+func (de *DecodeError) Unwrap() error {
+ return de.wrapped
+}
+
+// Error implements the error interface.
+func (de *DecodeError) Error() string {
+ // The keys are stored in reverse order because the de.keys slice is builtup while propagating the error up the
+ // stack of BSON keys, so we call de.Keys(), which reverses them.
+ keyPath := strings.Join(de.Keys(), ".")
+ return fmt.Sprintf("error decoding key %s: %v", keyPath, de.wrapped)
+}
+
+// Keys returns the BSON key path that caused an error as a slice of strings. The keys in the slice are in top-down
+// order. For example, if the document being unmarshalled was {a: {b: {c: 1}}} and the value for c was supposed to be
+// a string, the keys slice will be ["a", "b", "c"].
+func (de *DecodeError) Keys() []string {
+ reversedKeys := make([]string, 0, len(de.keys))
+ for idx := len(de.keys) - 1; idx >= 0; idx-- {
+ reversedKeys = append(reversedKeys, de.keys[idx])
+ }
+
+ return reversedKeys
+}
+
+// Zeroer allows custom struct types to implement a report of zero
+// state. All struct types that don't implement Zeroer or where IsZero
+// returns false are considered to be not zero.
+type Zeroer interface {
+ IsZero() bool
+}
+
+// StructCodec is the Codec used for struct values.
+//
+// Deprecated: StructCodec will not be directly configurable in Go Driver 2.0.
+// To configure the struct encode and decode behavior, use the configuration
+// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or
+// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the struct encode
+// and decode behavior for a mongo.Client, use
+// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions].
+//
+// For example, to configure a mongo.Client to omit zero-value structs when
+// using the "omitempty" struct tag, use:
+//
+// opt := options.Client().SetBSONOptions(&options.BSONOptions{
+// OmitZeroStruct: true,
+// })
+//
+// See the deprecation notice for each field in StructCodec for the corresponding
+// settings.
+type StructCodec struct {
+ cache sync.Map // map[reflect.Type]*structDescription
+ parser StructTagParser
+
+ // DecodeZeroStruct causes DecodeValue to delete any existing values from Go structs in the
+ // destination value passed to Decode before unmarshaling BSON documents into them.
+ //
+ // Deprecated: Use bson.Decoder.ZeroStructs or options.BSONOptions.ZeroStructs instead.
+ DecodeZeroStruct bool
+
+ // DecodeDeepZeroInline causes DecodeValue to delete any existing values from Go structs in the
+ // destination value passed to Decode before unmarshaling BSON documents into them.
+ //
+ // Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0.
+ DecodeDeepZeroInline bool
+
+ // EncodeOmitDefaultStruct causes the Encoder to consider the zero value for a struct (e.g.
+ // MyStruct{}) as empty and omit it from the marshaled BSON when the "omitempty" struct tag
+ // option is set.
+ //
+ // Deprecated: Use bson.Encoder.OmitZeroStruct or options.BSONOptions.OmitZeroStruct instead.
+ EncodeOmitDefaultStruct bool
+
+ // AllowUnexportedFields allows encoding and decoding values from un-exported struct fields.
+ //
+ // Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be
+ // supported in Go Driver 2.0.
+ AllowUnexportedFields bool
+
+ // OverwriteDuplicatedInlinedFields, if false, causes EncodeValue to return an error if there is
+ // a duplicate field in the marshaled BSON when the "inline" struct tag option is set. The
+ // default value is true.
+ //
+ // Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates or
+ // options.BSONOptions.ErrorOnInlineDuplicates instead.
+ OverwriteDuplicatedInlinedFields bool
+}
+
+var _ ValueEncoder = &StructCodec{}
+var _ ValueDecoder = &StructCodec{}
+
+// NewStructCodec returns a StructCodec that uses p for struct tag parsing.
+//
+// Deprecated: NewStructCodec will not be available in Go Driver 2.0. See
+// [StructCodec] for more details.
+func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) {
+ if p == nil {
+ return nil, errors.New("a StructTagParser must be provided to NewStructCodec")
+ }
+
+ structOpt := bsonoptions.MergeStructCodecOptions(opts...)
+
+ codec := &StructCodec{
+ parser: p,
+ }
+
+ if structOpt.DecodeZeroStruct != nil {
+ codec.DecodeZeroStruct = *structOpt.DecodeZeroStruct
+ }
+ if structOpt.DecodeDeepZeroInline != nil {
+ codec.DecodeDeepZeroInline = *structOpt.DecodeDeepZeroInline
+ }
+ if structOpt.EncodeOmitDefaultStruct != nil {
+ codec.EncodeOmitDefaultStruct = *structOpt.EncodeOmitDefaultStruct
+ }
+ if structOpt.OverwriteDuplicatedInlinedFields != nil {
+ codec.OverwriteDuplicatedInlinedFields = *structOpt.OverwriteDuplicatedInlinedFields
+ }
+ if structOpt.AllowUnexportedFields != nil {
+ codec.AllowUnexportedFields = *structOpt.AllowUnexportedFields
+ }
+
+ return codec, nil
+}
+
+// EncodeValue handles encoding generic struct types.
+func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Kind() != reflect.Struct {
+ return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val}
+ }
+
+ sd, err := sc.describeStruct(ec.Registry, val.Type(), ec.useJSONStructTags, ec.errorOnInlineDuplicates)
+ if err != nil {
+ return err
+ }
+
+ dw, err := vw.WriteDocument()
+ if err != nil {
+ return err
+ }
+ var rv reflect.Value
+ for _, desc := range sd.fl {
+ if desc.inline == nil {
+ rv = val.Field(desc.idx)
+ } else {
+ rv, err = fieldByIndexErr(val, desc.inline)
+ if err != nil {
+ continue
+ }
+ }
+
+ desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(ec, desc.encoder, rv)
+
+ if err != nil && !errors.Is(err, errInvalidValue) {
+ return err
+ }
+
+ if errors.Is(err, errInvalidValue) {
+ if desc.omitEmpty {
+ continue
+ }
+ vw2, err := dw.WriteDocumentElement(desc.name)
+ if err != nil {
+ return err
+ }
+ err = vw2.WriteNull()
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if desc.encoder == nil {
+ return ErrNoEncoder{Type: rv.Type()}
+ }
+
+ encoder := desc.encoder
+
+ var empty bool
+ if cz, ok := encoder.(CodecZeroer); ok {
+ empty = cz.IsTypeZero(rv.Interface())
+ } else if rv.Kind() == reflect.Interface {
+ // isEmpty will not treat an interface rv as an interface, so we need to check for the
+ // nil interface separately.
+ empty = rv.IsNil()
+ } else {
+ empty = isEmpty(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct)
+ }
+ if desc.omitEmpty && empty {
+ continue
+ }
+
+ vw2, err := dw.WriteDocumentElement(desc.name)
+ if err != nil {
+ return err
+ }
+
+ ectx := EncodeContext{
+ Registry: ec.Registry,
+ MinSize: desc.minSize || ec.MinSize,
+ errorOnInlineDuplicates: ec.errorOnInlineDuplicates,
+ stringifyMapKeysWithFmt: ec.stringifyMapKeysWithFmt,
+ nilMapAsEmpty: ec.nilMapAsEmpty,
+ nilSliceAsEmpty: ec.nilSliceAsEmpty,
+ nilByteSliceAsEmpty: ec.nilByteSliceAsEmpty,
+ omitZeroStruct: ec.omitZeroStruct,
+ useJSONStructTags: ec.useJSONStructTags,
+ }
+ err = encoder.EncodeValue(ectx, vw2, rv)
+ if err != nil {
+ return err
+ }
+ }
+
+ if sd.inlineMap >= 0 {
+ rv := val.Field(sd.inlineMap)
+ collisionFn := func(key string) bool {
+ _, exists := sd.fm[key]
+ return exists
+ }
+
+ return defaultMapCodec.mapEncodeValue(ec, dw, rv, collisionFn)
+ }
+
+ return dw.WriteDocumentEnd()
+}
+
+func newDecodeError(key string, original error) error {
+ var de *DecodeError
+ if !errors.As(original, &de) {
+ return &DecodeError{
+ keys: []string{key},
+ wrapped: original,
+ }
+ }
+
+ de.keys = append(de.keys, key)
+ return de
+}
+
+// DecodeValue implements the Codec interface.
+// By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr.
+// For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared.
+func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Kind() != reflect.Struct {
+ return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val}
+ }
+
+ switch vrType := vr.Type(); vrType {
+ case bsontype.Type(0), bsontype.EmbeddedDocument:
+ case bsontype.Null:
+ if err := vr.ReadNull(); err != nil {
+ return err
+ }
+
+ val.Set(reflect.Zero(val.Type()))
+ return nil
+ case bsontype.Undefined:
+ if err := vr.ReadUndefined(); err != nil {
+ return err
+ }
+
+ val.Set(reflect.Zero(val.Type()))
+ return nil
+ default:
+ return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type())
+ }
+
+ sd, err := sc.describeStruct(dc.Registry, val.Type(), dc.useJSONStructTags, false)
+ if err != nil {
+ return err
+ }
+
+ if sc.DecodeZeroStruct || dc.zeroStructs {
+ val.Set(reflect.Zero(val.Type()))
+ }
+ if sc.DecodeDeepZeroInline && sd.inline {
+ val.Set(deepZero(val.Type()))
+ }
+
+ var decoder ValueDecoder
+ var inlineMap reflect.Value
+ if sd.inlineMap >= 0 {
+ inlineMap = val.Field(sd.inlineMap)
+ decoder, err = dc.LookupDecoder(inlineMap.Type().Elem())
+ if err != nil {
+ return err
+ }
+ }
+
+ dr, err := vr.ReadDocument()
+ if err != nil {
+ return err
+ }
+
+ for {
+ name, vr, err := dr.ReadElement()
+ if errors.Is(err, bsonrw.ErrEOD) {
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ fd, exists := sd.fm[name]
+ if !exists {
+ // if the original name isn't found in the struct description, try again with the name in lowercase
+ // this could match if a BSON tag isn't specified because by default, describeStruct lowercases all field
+ // names
+ fd, exists = sd.fm[strings.ToLower(name)]
+ }
+
+ if !exists {
+ if sd.inlineMap < 0 {
+ // The encoding/json package requires a flag to return on error for non-existent fields.
+ // This functionality seems appropriate for the struct codec.
+ err = vr.Skip()
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if inlineMap.IsNil() {
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+ }
+
+ elem := reflect.New(inlineMap.Type().Elem()).Elem()
+ dc.Ancestor = inlineMap.Type()
+ err = decoder.DecodeValue(dc, vr, elem)
+ if err != nil {
+ return err
+ }
+ inlineMap.SetMapIndex(reflect.ValueOf(name), elem)
+ continue
+ }
+
+ var field reflect.Value
+ if fd.inline == nil {
+ field = val.Field(fd.idx)
+ } else {
+ field, err = getInlineField(val, fd.inline)
+ if err != nil {
+ return err
+ }
+ }
+
+ if !field.CanSet() { // Being settable is a super set of being addressable.
+ innerErr := fmt.Errorf("field %v is not settable", field)
+ return newDecodeError(fd.name, innerErr)
+ }
+ if field.Kind() == reflect.Ptr && field.IsNil() {
+ field.Set(reflect.New(field.Type().Elem()))
+ }
+ field = field.Addr()
+
+ dctx := DecodeContext{
+ Registry: dc.Registry,
+ Truncate: fd.truncate || dc.Truncate,
+ defaultDocumentType: dc.defaultDocumentType,
+ binaryAsSlice: dc.binaryAsSlice,
+ useJSONStructTags: dc.useJSONStructTags,
+ useLocalTimeZone: dc.useLocalTimeZone,
+ zeroMaps: dc.zeroMaps,
+ zeroStructs: dc.zeroStructs,
+ }
+
+ if fd.decoder == nil {
+ return newDecodeError(fd.name, ErrNoDecoder{Type: field.Elem().Type()})
+ }
+
+ err = fd.decoder.DecodeValue(dctx, vr, field.Elem())
+ if err != nil {
+ return newDecodeError(fd.name, err)
+ }
+ }
+
+ return nil
+}
+
+func isEmpty(v reflect.Value, omitZeroStruct bool) bool {
+ kind := v.Kind()
+ if (kind != reflect.Ptr || !v.IsNil()) && v.Type().Implements(tZeroer) {
+ return v.Interface().(Zeroer).IsZero()
+ }
+ switch kind {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Struct:
+ if !omitZeroStruct {
+ return false
+ }
+ vt := v.Type()
+ if vt == tTime {
+ return v.Interface().(time.Time).IsZero()
+ }
+ numField := vt.NumField()
+ for i := 0; i < numField; i++ {
+ ff := vt.Field(i)
+ if ff.PkgPath != "" && !ff.Anonymous {
+ continue // Private field
+ }
+ if !isEmpty(v.Field(i), omitZeroStruct) {
+ return false
+ }
+ }
+ return true
+ }
+ return !v.IsValid() || v.IsZero()
+}
+
+type structDescription struct {
+ fm map[string]fieldDescription
+ fl []fieldDescription
+ inlineMap int
+ inline bool
+}
+
+type fieldDescription struct {
+ name string // BSON key name
+ fieldName string // struct field name
+ idx int
+ omitEmpty bool
+ minSize bool
+ truncate bool
+ inline []int
+ encoder ValueEncoder
+ decoder ValueDecoder
+}
+
+type byIndex []fieldDescription
+
+func (bi byIndex) Len() int { return len(bi) }
+
+func (bi byIndex) Swap(i, j int) { bi[i], bi[j] = bi[j], bi[i] }
+
+func (bi byIndex) Less(i, j int) bool {
+ // If a field is inlined, its index in the top level struct is stored at inline[0]
+ iIdx, jIdx := bi[i].idx, bi[j].idx
+ if len(bi[i].inline) > 0 {
+ iIdx = bi[i].inline[0]
+ }
+ if len(bi[j].inline) > 0 {
+ jIdx = bi[j].inline[0]
+ }
+ if iIdx != jIdx {
+ return iIdx < jIdx
+ }
+ for k, biik := range bi[i].inline {
+ if k >= len(bi[j].inline) {
+ return false
+ }
+ if biik != bi[j].inline[k] {
+ return biik < bi[j].inline[k]
+ }
+ }
+ return len(bi[i].inline) < len(bi[j].inline)
+}
+
+func (sc *StructCodec) describeStruct(
+ r *Registry,
+ t reflect.Type,
+ useJSONStructTags bool,
+ errorOnDuplicates bool,
+) (*structDescription, error) {
+ // We need to analyze the struct, including getting the tags, collecting
+ // information about inlining, and create a map of the field name to the field.
+ if v, ok := sc.cache.Load(t); ok {
+ return v.(*structDescription), nil
+ }
+ // TODO(charlie): Only describe the struct once when called
+ // concurrently with the same type.
+ ds, err := sc.describeStructSlow(r, t, useJSONStructTags, errorOnDuplicates)
+ if err != nil {
+ return nil, err
+ }
+ if v, loaded := sc.cache.LoadOrStore(t, ds); loaded {
+ ds = v.(*structDescription)
+ }
+ return ds, nil
+}
+
+func (sc *StructCodec) describeStructSlow(
+ r *Registry,
+ t reflect.Type,
+ useJSONStructTags bool,
+ errorOnDuplicates bool,
+) (*structDescription, error) {
+ numFields := t.NumField()
+ sd := &structDescription{
+ fm: make(map[string]fieldDescription, numFields),
+ fl: make([]fieldDescription, 0, numFields),
+ inlineMap: -1,
+ }
+
+ var fields []fieldDescription
+ for i := 0; i < numFields; i++ {
+ sf := t.Field(i)
+ if sf.PkgPath != "" && (!sc.AllowUnexportedFields || !sf.Anonymous) {
+ // field is private or unexported fields aren't allowed, ignore
+ continue
+ }
+
+ sfType := sf.Type
+ encoder, err := r.LookupEncoder(sfType)
+ if err != nil {
+ encoder = nil
+ }
+ decoder, err := r.LookupDecoder(sfType)
+ if err != nil {
+ decoder = nil
+ }
+
+ description := fieldDescription{
+ fieldName: sf.Name,
+ idx: i,
+ encoder: encoder,
+ decoder: decoder,
+ }
+
+ var stags StructTags
+ // If the caller requested that we use JSON struct tags, use the JSONFallbackStructTagParser
+ // instead of the parser defined on the codec.
+ if useJSONStructTags {
+ stags, err = JSONFallbackStructTagParser.ParseStructTags(sf)
+ } else {
+ stags, err = sc.parser.ParseStructTags(sf)
+ }
+ if err != nil {
+ return nil, err
+ }
+ if stags.Skip {
+ continue
+ }
+ description.name = stags.Name
+ description.omitEmpty = stags.OmitEmpty
+ description.minSize = stags.MinSize
+ description.truncate = stags.Truncate
+
+ if stags.Inline {
+ sd.inline = true
+ switch sfType.Kind() {
+ case reflect.Map:
+ if sd.inlineMap >= 0 {
+ return nil, errors.New("(struct " + t.String() + ") multiple inline maps")
+ }
+ if sfType.Key() != tString {
+ return nil, errors.New("(struct " + t.String() + ") inline map must have a string keys")
+ }
+ sd.inlineMap = description.idx
+ case reflect.Ptr:
+ sfType = sfType.Elem()
+ if sfType.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String())
+ }
+ fallthrough
+ case reflect.Struct:
+ inlinesf, err := sc.describeStruct(r, sfType, useJSONStructTags, errorOnDuplicates)
+ if err != nil {
+ return nil, err
+ }
+ for _, fd := range inlinesf.fl {
+ if fd.inline == nil {
+ fd.inline = []int{i, fd.idx}
+ } else {
+ fd.inline = append([]int{i}, fd.inline...)
+ }
+ fields = append(fields, fd)
+
+ }
+ default:
+ return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String())
+ }
+ continue
+ }
+ fields = append(fields, description)
+ }
+
+ // Sort fieldDescriptions by name and use dominance rules to determine which should be added for each name
+ sort.Slice(fields, func(i, j int) bool {
+ x := fields
+ // sort field by name, breaking ties with depth, then
+ // breaking ties with index sequence.
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].inline) != len(x[j].inline) {
+ return len(x[i].inline) < len(x[j].inline)
+ }
+ return byIndex(x).Less(i, j)
+ })
+
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ sd.fl = append(sd.fl, fi)
+ sd.fm[name] = fi
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if !ok || !sc.OverwriteDuplicatedInlinedFields || errorOnDuplicates {
+ return nil, fmt.Errorf("struct %s has duplicated key %s", t.String(), name)
+ }
+ sd.fl = append(sd.fl, dominant)
+ sd.fm[name] = dominant
+ }
+
+ sort.Sort(byIndex(sd.fl))
+
+ return sd, nil
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's inlining rules. If there are multiple top-level
+// fields, the boolean will be false: This condition is an error in Go
+// and we skip all the fields.
+func dominantField(fields []fieldDescription) (fieldDescription, bool) {
+ // The fields are sorted in increasing index-length order, then by presence of tag.
+ // That means that the first field is the dominant one. We need only check
+ // for error cases: two fields at top level.
+ if len(fields) > 1 &&
+ len(fields[0].inline) == len(fields[1].inline) {
+ return fieldDescription{}, false
+ }
+ return fields[0], true
+}
+
+func fieldByIndexErr(v reflect.Value, index []int) (result reflect.Value, err error) {
+ defer func() {
+ if recovered := recover(); recovered != nil {
+ switch r := recovered.(type) {
+ case string:
+ err = fmt.Errorf("%s", r)
+ case error:
+ err = r
+ }
+ }
+ }()
+
+ result = v.FieldByIndex(index)
+ return
+}
+
+func getInlineField(val reflect.Value, index []int) (reflect.Value, error) {
+ field, err := fieldByIndexErr(val, index)
+ if err == nil {
+ return field, nil
+ }
+
+ // if parent of this element doesn't exist, fix its parent
+ inlineParent := index[:len(index)-1]
+ var fParent reflect.Value
+ if fParent, err = fieldByIndexErr(val, inlineParent); err != nil {
+ fParent, err = getInlineField(val, inlineParent)
+ if err != nil {
+ return fParent, err
+ }
+ }
+ fParent.Set(reflect.New(fParent.Type().Elem()))
+
+ return fieldByIndexErr(val, index)
+}
+
+// DeepZero returns recursive zero object
+func deepZero(st reflect.Type) (result reflect.Value) {
+ if st.Kind() == reflect.Struct {
+ numField := st.NumField()
+ for i := 0; i < numField; i++ {
+ if result == emptyValue {
+ result = reflect.Indirect(reflect.New(st))
+ }
+ f := result.Field(i)
+ if f.CanInterface() {
+ if f.Type().Kind() == reflect.Struct {
+ result.Field(i).Set(recursivePointerTo(deepZero(f.Type().Elem())))
+ }
+ }
+ }
+ }
+ return result
+}
+
+// recursivePointerTo calls reflect.New(v.Type) but recursively for its fields inside
+func recursivePointerTo(v reflect.Value) reflect.Value {
+ v = reflect.Indirect(v)
+ result := reflect.New(v.Type())
+ if v.Kind() == reflect.Struct {
+ for i := 0; i < v.NumField(); i++ {
+ if f := v.Field(i); f.Kind() == reflect.Ptr {
+ if f.Elem().Kind() == reflect.Struct {
+ result.Elem().Field(i).Set(recursivePointerTo(f))
+ }
+ }
+ }
+ }
+
+ return result
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go
new file mode 100644
index 00000000000..18d85bfb031
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go
@@ -0,0 +1,148 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+ "reflect"
+ "strings"
+)
+
+// StructTagParser returns the struct tags for a given struct field.
+//
+// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0.
+type StructTagParser interface {
+ ParseStructTags(reflect.StructField) (StructTags, error)
+}
+
+// StructTagParserFunc is an adapter that allows a generic function to be used
+// as a StructTagParser.
+//
+// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0.
+type StructTagParserFunc func(reflect.StructField) (StructTags, error)
+
+// ParseStructTags implements the StructTagParser interface.
+func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructTags, error) {
+ return stpf(sf)
+}
+
+// StructTags represents the struct tag fields that the StructCodec uses during
+// the encoding and decoding process.
+//
+// In the case of a struct, the lowercased field name is used as the key for each exported
+// field but this behavior may be changed using a struct tag. The tag may also contain flags to
+// adjust the marshalling behavior for the field.
+//
+// The properties are defined below:
+//
+// OmitEmpty Only include the field if it's not set to the zero value for the type or to
+// empty slices or maps.
+//
+// MinSize Marshal an integer of a type larger than 32 bits value as an int32, if that's
+// feasible while preserving the numeric value.
+//
+// Truncate When unmarshaling a BSON double, it is permitted to lose precision to fit within
+// a float32.
+//
+// Inline Inline the field, which must be a struct or a map, causing all of its fields
+// or keys to be processed as if they were part of the outer struct. For maps,
+// keys must not conflict with the bson keys of other struct fields.
+//
+// Skip This struct field should be skipped. This is usually denoted by parsing a "-"
+// for the name.
+//
+// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0.
+type StructTags struct {
+ Name string
+ OmitEmpty bool
+ MinSize bool
+ Truncate bool
+ Inline bool
+ Skip bool
+}
+
+// DefaultStructTagParser is the StructTagParser used by the StructCodec by default.
+// It will handle the bson struct tag. See the documentation for StructTags to see
+// what each of the returned fields means.
+//
+// If there is no name in the struct tag fields, the struct field name is lowercased.
+// The tag formats accepted are:
+//
+// "[][,[,]]"
+//
+// `(...) bson:"[][,[,]]" (...)`
+//
+// An example:
+//
+// type T struct {
+// A bool
+// B int "myb"
+// C string "myc,omitempty"
+// D string `bson:",omitempty" json:"jsonkey"`
+// E int64 ",minsize"
+// F int64 "myf,omitempty,minsize"
+// }
+//
+// A struct tag either consisting entirely of '-' or with a bson key with a
+// value consisting entirely of '-' will return a StructTags with Skip true and
+// the remaining fields will be their default values.
+//
+// Deprecated: DefaultStructTagParser will be removed in Go Driver 2.0.
+var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) {
+ key := strings.ToLower(sf.Name)
+ tag, ok := sf.Tag.Lookup("bson")
+ if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 {
+ tag = string(sf.Tag)
+ }
+ return parseTags(key, tag)
+}
+
+func parseTags(key string, tag string) (StructTags, error) {
+ var st StructTags
+ if tag == "-" {
+ st.Skip = true
+ return st, nil
+ }
+
+ for idx, str := range strings.Split(tag, ",") {
+ if idx == 0 && str != "" {
+ key = str
+ }
+ switch str {
+ case "omitempty":
+ st.OmitEmpty = true
+ case "minsize":
+ st.MinSize = true
+ case "truncate":
+ st.Truncate = true
+ case "inline":
+ st.Inline = true
+ }
+ }
+
+ st.Name = key
+
+ return st, nil
+}
+
+// JSONFallbackStructTagParser has the same behavior as DefaultStructTagParser
+// but will also fallback to parsing the json tag instead on a field where the
+// bson tag isn't available.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] and
+// [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead.
+var JSONFallbackStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) {
+ key := strings.ToLower(sf.Name)
+ tag, ok := sf.Tag.Lookup("bson")
+ if !ok {
+ tag, ok = sf.Tag.Lookup("json")
+ }
+ if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 {
+ tag = string(sf.Tag)
+ }
+
+ return parseTags(key, tag)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go
new file mode 100644
index 00000000000..22fb762c415
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go
@@ -0,0 +1,151 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+
+ "go.mongodb.org/mongo-driver/bson/bsonoptions"
+ "go.mongodb.org/mongo-driver/bson/bsonrw"
+ "go.mongodb.org/mongo-driver/bson/bsontype"
+ "go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+const (
+ timeFormatString = "2006-01-02T15:04:05.999Z07:00"
+)
+
+// TimeCodec is the Codec used for time.Time values.
+//
+// Deprecated: TimeCodec will not be directly configurable in Go Driver 2.0.
+// To configure the time.Time encode and decode behavior, use the configuration
+// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or
+// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the time.Time encode
+// and decode behavior for a mongo.Client, use
+// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions].
+//
+// For example, to configure a mongo.Client to ..., use:
+//
+// opt := options.Client().SetBSONOptions(&options.BSONOptions{
+// UseLocalTimeZone: true,
+// })
+//
+// See the deprecation notice for each field in TimeCodec for the corresponding
+// settings.
+type TimeCodec struct {
+ // UseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false.
+ //
+ // Deprecated: Use bson.Decoder.UseLocalTimeZone or options.BSONOptions.UseLocalTimeZone
+ // instead.
+ UseLocalTimeZone bool
+}
+
+var (
+ defaultTimeCodec = NewTimeCodec()
+
+ // Assert that defaultTimeCodec satisfies the typeDecoder interface, which allows it to be used
+ // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection.
+ _ typeDecoder = defaultTimeCodec
+)
+
+// NewTimeCodec returns a TimeCodec with options opts.
+//
+// Deprecated: NewTimeCodec will not be available in Go Driver 2.0. See
+// [TimeCodec] for more details.
+func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec {
+ timeOpt := bsonoptions.MergeTimeCodecOptions(opts...)
+
+ codec := TimeCodec{}
+ if timeOpt.UseLocalTimeZone != nil {
+ codec.UseLocalTimeZone = *timeOpt.UseLocalTimeZone
+ }
+ return &codec
+}
+
+func (tc *TimeCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ if t != tTime {
+ return emptyValue, ValueDecoderError{
+ Name: "TimeDecodeValue",
+ Types: []reflect.Type{tTime},
+ Received: reflect.Zero(t),
+ }
+ }
+
+ var timeVal time.Time
+ switch vrType := vr.Type(); vrType {
+ case bsontype.DateTime:
+ dt, err := vr.ReadDateTime()
+ if err != nil {
+ return emptyValue, err
+ }
+ timeVal = time.Unix(dt/1000, dt%1000*1000000)
+ case bsontype.String:
+ // assume strings are in the isoTimeFormat
+ timeStr, err := vr.ReadString()
+ if err != nil {
+ return emptyValue, err
+ }
+ timeVal, err = time.Parse(timeFormatString, timeStr)
+ if err != nil {
+ return emptyValue, err
+ }
+ case bsontype.Int64:
+ i64, err := vr.ReadInt64()
+ if err != nil {
+ return emptyValue, err
+ }
+ timeVal = time.Unix(i64/1000, i64%1000*1000000)
+ case bsontype.Timestamp:
+ t, _, err := vr.ReadTimestamp()
+ if err != nil {
+ return emptyValue, err
+ }
+ timeVal = time.Unix(int64(t), 0)
+ case bsontype.Null:
+ if err := vr.ReadNull(); err != nil {
+ return emptyValue, err
+ }
+ case bsontype.Undefined:
+ if err := vr.ReadUndefined(); err != nil {
+ return emptyValue, err
+ }
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into a time.Time", vrType)
+ }
+
+ if !tc.UseLocalTimeZone && !dc.useLocalTimeZone {
+ timeVal = timeVal.UTC()
+ }
+ return reflect.ValueOf(timeVal), nil
+}
+
+// DecodeValue is the ValueDecoderFunc for time.Time.
+func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() || val.Type() != tTime {
+ return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val}
+ }
+
+ elem, err := tc.decodeType(dc, vr, tTime)
+ if err != nil {
+ return err
+ }
+
+ val.Set(elem)
+ return nil
+}
+
+// EncodeValue is the ValueEncoderFunc for time.TIme.
+func (tc *TimeCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ if !val.IsValid() || val.Type() != tTime {
+ return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val}
+ }
+ tt := val.Interface().(time.Time)
+ dt := primitive.NewDateTimeFromTime(tt)
+ return vw.WriteDateTime(int64(dt))
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go
new file mode 100644
index 00000000000..6ade17b7d3f
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go
@@ -0,0 +1,58 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+ "encoding/json"
+ "net/url"
+ "reflect"
+ "time"
+
+ "go.mongodb.org/mongo-driver/bson/primitive"
+ "go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+var tBool = reflect.TypeOf(false)
+var tFloat64 = reflect.TypeOf(float64(0))
+var tInt32 = reflect.TypeOf(int32(0))
+var tInt64 = reflect.TypeOf(int64(0))
+var tString = reflect.TypeOf("")
+var tTime = reflect.TypeOf(time.Time{})
+
+var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem()
+var tByteSlice = reflect.TypeOf([]byte(nil))
+var tByte = reflect.TypeOf(byte(0x00))
+var tURL = reflect.TypeOf(url.URL{})
+var tJSONNumber = reflect.TypeOf(json.Number(""))
+
+var tValueMarshaler = reflect.TypeOf((*ValueMarshaler)(nil)).Elem()
+var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem()
+var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem()
+var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem()
+var tZeroer = reflect.TypeOf((*Zeroer)(nil)).Elem()
+
+var tBinary = reflect.TypeOf(primitive.Binary{})
+var tUndefined = reflect.TypeOf(primitive.Undefined{})
+var tOID = reflect.TypeOf(primitive.ObjectID{})
+var tDateTime = reflect.TypeOf(primitive.DateTime(0))
+var tNull = reflect.TypeOf(primitive.Null{})
+var tRegex = reflect.TypeOf(primitive.Regex{})
+var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{})
+var tDBPointer = reflect.TypeOf(primitive.DBPointer{})
+var tJavaScript = reflect.TypeOf(primitive.JavaScript(""))
+var tSymbol = reflect.TypeOf(primitive.Symbol(""))
+var tTimestamp = reflect.TypeOf(primitive.Timestamp{})
+var tDecimal = reflect.TypeOf(primitive.Decimal128{})
+var tMinKey = reflect.TypeOf(primitive.MinKey{})
+var tMaxKey = reflect.TypeOf(primitive.MaxKey{})
+var tD = reflect.TypeOf(primitive.D{})
+var tA = reflect.TypeOf(primitive.A{})
+var tE = reflect.TypeOf(primitive.E{})
+
+var tCoreDocument = reflect.TypeOf(bsoncore.Document{})
+var tCoreArray = reflect.TypeOf(bsoncore.Array{})
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go
new file mode 100644
index 00000000000..39b07135b18
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go
@@ -0,0 +1,202 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+
+ "go.mongodb.org/mongo-driver/bson/bsonoptions"
+ "go.mongodb.org/mongo-driver/bson/bsonrw"
+ "go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+// UIntCodec is the Codec used for uint values.
+//
+// Deprecated: UIntCodec will not be directly configurable in Go Driver 2.0. To
+// configure the uint encode and decode behavior, use the configuration methods
+// on a [go.mongodb.org/mongo-driver/bson.Encoder] or
+// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the uint encode and
+// decode behavior for a mongo.Client, use
+// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions].
+//
+// For example, to configure a mongo.Client to marshal Go uint values as the
+// minimum BSON int size that can represent the value, use:
+//
+// opt := options.Client().SetBSONOptions(&options.BSONOptions{
+// IntMinSize: true,
+// })
+//
+// See the deprecation notice for each field in UIntCodec for the corresponding
+// settings.
+type UIntCodec struct {
+ // EncodeToMinSize causes EncodeValue to marshal Go uint values (excluding uint64) as the
+ // minimum BSON int size (either 32-bit or 64-bit) that can represent the integer value.
+ //
+ // Deprecated: Use bson.Encoder.IntMinSize or options.BSONOptions.IntMinSize instead.
+ EncodeToMinSize bool
+}
+
+var (
+ defaultUIntCodec = NewUIntCodec()
+
+ // Assert that defaultUIntCodec satisfies the typeDecoder interface, which allows it to be used
+ // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection.
+ _ typeDecoder = defaultUIntCodec
+)
+
+// NewUIntCodec returns a UIntCodec with options opts.
+//
+// Deprecated: NewUIntCodec will not be available in Go Driver 2.0. See
+// [UIntCodec] for more details.
+func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec {
+ uintOpt := bsonoptions.MergeUIntCodecOptions(opts...)
+
+ codec := UIntCodec{}
+ if uintOpt.EncodeToMinSize != nil {
+ codec.EncodeToMinSize = *uintOpt.EncodeToMinSize
+ }
+ return &codec
+}
+
+// EncodeValue is the ValueEncoder for uint types.
+func (uic *UIntCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+ switch val.Kind() {
+ case reflect.Uint8, reflect.Uint16:
+ return vw.WriteInt32(int32(val.Uint()))
+ case reflect.Uint, reflect.Uint32, reflect.Uint64:
+ u64 := val.Uint()
+
+ // If ec.MinSize or if encodeToMinSize is true for a non-uint64 value we should write val as an int32
+ useMinSize := ec.MinSize || (uic.EncodeToMinSize && val.Kind() != reflect.Uint64)
+
+ if u64 <= math.MaxInt32 && useMinSize {
+ return vw.WriteInt32(int32(u64))
+ }
+ if u64 > math.MaxInt64 {
+ return fmt.Errorf("%d overflows int64", u64)
+ }
+ return vw.WriteInt64(int64(u64))
+ }
+
+ return ValueEncoderError{
+ Name: "UintEncodeValue",
+ Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
+ Received: val,
+ }
+}
+
+func (uic *UIntCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
+ var i64 int64
+ var err error
+ switch vrType := vr.Type(); vrType {
+ case bsontype.Int32:
+ i32, err := vr.ReadInt32()
+ if err != nil {
+ return emptyValue, err
+ }
+ i64 = int64(i32)
+ case bsontype.Int64:
+ i64, err = vr.ReadInt64()
+ if err != nil {
+ return emptyValue, err
+ }
+ case bsontype.Double:
+ f64, err := vr.ReadDouble()
+ if err != nil {
+ return emptyValue, err
+ }
+ if !dc.Truncate && math.Floor(f64) != f64 {
+ return emptyValue, errCannotTruncate
+ }
+ if f64 > float64(math.MaxInt64) {
+ return emptyValue, fmt.Errorf("%g overflows int64", f64)
+ }
+ i64 = int64(f64)
+ case bsontype.Boolean:
+ b, err := vr.ReadBoolean()
+ if err != nil {
+ return emptyValue, err
+ }
+ if b {
+ i64 = 1
+ }
+ case bsontype.Null:
+ if err = vr.ReadNull(); err != nil {
+ return emptyValue, err
+ }
+ case bsontype.Undefined:
+ if err = vr.ReadUndefined(); err != nil {
+ return emptyValue, err
+ }
+ default:
+ return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType)
+ }
+
+ switch t.Kind() {
+ case reflect.Uint8:
+ if i64 < 0 || i64 > math.MaxUint8 {
+ return emptyValue, fmt.Errorf("%d overflows uint8", i64)
+ }
+
+ return reflect.ValueOf(uint8(i64)), nil
+ case reflect.Uint16:
+ if i64 < 0 || i64 > math.MaxUint16 {
+ return emptyValue, fmt.Errorf("%d overflows uint16", i64)
+ }
+
+ return reflect.ValueOf(uint16(i64)), nil
+ case reflect.Uint32:
+ if i64 < 0 || i64 > math.MaxUint32 {
+ return emptyValue, fmt.Errorf("%d overflows uint32", i64)
+ }
+
+ return reflect.ValueOf(uint32(i64)), nil
+ case reflect.Uint64:
+ if i64 < 0 {
+ return emptyValue, fmt.Errorf("%d overflows uint64", i64)
+ }
+
+ return reflect.ValueOf(uint64(i64)), nil
+ case reflect.Uint:
+ if i64 < 0 {
+ return emptyValue, fmt.Errorf("%d overflows uint", i64)
+ }
+ v := uint64(i64)
+ if v > math.MaxUint { // Can we fit this inside of an uint
+ return emptyValue, fmt.Errorf("%d overflows uint", i64)
+ }
+
+ return reflect.ValueOf(uint(v)), nil
+ default:
+ return emptyValue, ValueDecoderError{
+ Name: "UintDecodeValue",
+ Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
+ Received: reflect.Zero(t),
+ }
+ }
+}
+
+// DecodeValue is the ValueDecoder for uint types.
+func (uic *UIntCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+ if !val.CanSet() {
+ return ValueDecoderError{
+ Name: "UintDecodeValue",
+ Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
+ Received: val,
+ }
+ }
+
+ elem, err := uic.decodeType(dc, vr, val.Type())
+ if err != nil {
+ return err
+ }
+
+ val.SetUint(elem.Uint())
+ return nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go
new file mode 100644
index 00000000000..996bd17127a
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go
@@ -0,0 +1,49 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonoptions
+
+// ByteSliceCodecOptions represents all possible options for byte slice encoding and decoding.
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+type ByteSliceCodecOptions struct {
+ EncodeNilAsEmpty *bool // Specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false.
+}
+
+// ByteSliceCodec creates a new *ByteSliceCodecOptions
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+func ByteSliceCodec() *ByteSliceCodecOptions {
+ return &ByteSliceCodecOptions{}
+}
+
+// SetEncodeNilAsEmpty specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead.
+func (bs *ByteSliceCodecOptions) SetEncodeNilAsEmpty(b bool) *ByteSliceCodecOptions {
+ bs.EncodeNilAsEmpty = &b
+ return bs
+}
+
+// MergeByteSliceCodecOptions combines the given *ByteSliceCodecOptions into a single *ByteSliceCodecOptions in a last one wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeByteSliceCodecOptions(opts ...*ByteSliceCodecOptions) *ByteSliceCodecOptions {
+ bs := ByteSliceCodec()
+ for _, opt := range opts {
+ if opt == nil {
+ continue
+ }
+ if opt.EncodeNilAsEmpty != nil {
+ bs.EncodeNilAsEmpty = opt.EncodeNilAsEmpty
+ }
+ }
+
+ return bs
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go
new file mode 100644
index 00000000000..c40973c8d43
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go
@@ -0,0 +1,8 @@
+// Copyright (C) MongoDB, Inc. 2022-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package bsonoptions defines the optional configurations for the BSON codecs.
+package bsonoptions
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go
new file mode 100644
index 00000000000..f522c7e03fe
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go
@@ -0,0 +1,49 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonoptions
+
+// EmptyInterfaceCodecOptions represents all possible options for interface{} encoding and decoding.
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+type EmptyInterfaceCodecOptions struct {
+ DecodeBinaryAsSlice *bool // Specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false.
+}
+
+// EmptyInterfaceCodec creates a new *EmptyInterfaceCodecOptions
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+func EmptyInterfaceCodec() *EmptyInterfaceCodecOptions {
+ return &EmptyInterfaceCodecOptions{}
+}
+
+// SetDecodeBinaryAsSlice specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead.
+func (e *EmptyInterfaceCodecOptions) SetDecodeBinaryAsSlice(b bool) *EmptyInterfaceCodecOptions {
+ e.DecodeBinaryAsSlice = &b
+ return e
+}
+
+// MergeEmptyInterfaceCodecOptions combines the given *EmptyInterfaceCodecOptions into a single *EmptyInterfaceCodecOptions in a last one wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeEmptyInterfaceCodecOptions(opts ...*EmptyInterfaceCodecOptions) *EmptyInterfaceCodecOptions {
+ e := EmptyInterfaceCodec()
+ for _, opt := range opts {
+ if opt == nil {
+ continue
+ }
+ if opt.DecodeBinaryAsSlice != nil {
+ e.DecodeBinaryAsSlice = opt.DecodeBinaryAsSlice
+ }
+ }
+
+ return e
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go
new file mode 100644
index 00000000000..a7a7c1d9804
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go
@@ -0,0 +1,82 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonoptions
+
+// MapCodecOptions represents all possible options for map encoding and decoding.
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+type MapCodecOptions struct {
+ DecodeZerosMap *bool // Specifies if the map should be zeroed before decoding into it. Defaults to false.
+ EncodeNilAsEmpty *bool // Specifies if a nil map should encode as an empty document instead of null. Defaults to false.
+ // Specifies how keys should be handled. If false, the behavior matches encoding/json, where the encoding key type must
+ // either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key type must either be a
+ // string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with fmt.Sprint() and the
+ // encoding key type must be a string, an integer type, or a float. If true, the use of Stringer will override
+ // TextMarshaler/TextUnmarshaler. Defaults to false.
+ EncodeKeysWithStringer *bool
+}
+
+// MapCodec creates a new *MapCodecOptions
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+func MapCodec() *MapCodecOptions {
+ return &MapCodecOptions{}
+}
+
+// SetDecodeZerosMap specifies if the map should be zeroed before decoding into it. Defaults to false.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead.
+func (t *MapCodecOptions) SetDecodeZerosMap(b bool) *MapCodecOptions {
+ t.DecodeZerosMap = &b
+ return t
+}
+
+// SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead.
+func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions {
+ t.EncodeNilAsEmpty = &b
+ return t
+}
+
+// SetEncodeKeysWithStringer specifies how keys should be handled. If false, the behavior matches encoding/json, where the
+// encoding key type must either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key
+// type must either be a string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with
+// fmt.Sprint() and the encoding key type must be a string, an integer type, or a float. If true, the use of Stringer
+// will override TextMarshaler/TextUnmarshaler. Defaults to false.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead.
+func (t *MapCodecOptions) SetEncodeKeysWithStringer(b bool) *MapCodecOptions {
+ t.EncodeKeysWithStringer = &b
+ return t
+}
+
+// MergeMapCodecOptions combines the given *MapCodecOptions into a single *MapCodecOptions in a last one wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions {
+ s := MapCodec()
+ for _, opt := range opts {
+ if opt == nil {
+ continue
+ }
+ if opt.DecodeZerosMap != nil {
+ s.DecodeZerosMap = opt.DecodeZerosMap
+ }
+ if opt.EncodeNilAsEmpty != nil {
+ s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty
+ }
+ if opt.EncodeKeysWithStringer != nil {
+ s.EncodeKeysWithStringer = opt.EncodeKeysWithStringer
+ }
+ }
+
+ return s
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go
new file mode 100644
index 00000000000..3c1e4f35ba1
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go
@@ -0,0 +1,49 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonoptions
+
+// SliceCodecOptions represents all possible options for slice encoding and decoding.
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+type SliceCodecOptions struct {
+ EncodeNilAsEmpty *bool // Specifies if a nil slice should encode as an empty array instead of null. Defaults to false.
+}
+
+// SliceCodec creates a new *SliceCodecOptions
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+func SliceCodec() *SliceCodecOptions {
+ return &SliceCodecOptions{}
+}
+
+// SetEncodeNilAsEmpty specifies if a nil slice should encode as an empty array instead of null. Defaults to false.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead.
+func (s *SliceCodecOptions) SetEncodeNilAsEmpty(b bool) *SliceCodecOptions {
+ s.EncodeNilAsEmpty = &b
+ return s
+}
+
+// MergeSliceCodecOptions combines the given *SliceCodecOptions into a single *SliceCodecOptions in a last one wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeSliceCodecOptions(opts ...*SliceCodecOptions) *SliceCodecOptions {
+ s := SliceCodec()
+ for _, opt := range opts {
+ if opt == nil {
+ continue
+ }
+ if opt.EncodeNilAsEmpty != nil {
+ s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty
+ }
+ }
+
+ return s
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go
new file mode 100644
index 00000000000..f8b76f996e4
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go
@@ -0,0 +1,52 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonoptions
+
+var defaultDecodeOIDAsHex = true
+
+// StringCodecOptions represents all possible options for string encoding and decoding.
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+type StringCodecOptions struct {
+ DecodeObjectIDAsHex *bool // Specifies if we should decode ObjectID as the hex value. Defaults to true.
+}
+
+// StringCodec creates a new *StringCodecOptions
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+func StringCodec() *StringCodecOptions {
+ return &StringCodecOptions{}
+}
+
+// SetDecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. If false, a string made
+// from the raw object ID bytes will be used. Defaults to true.
+//
+// Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0.
+func (t *StringCodecOptions) SetDecodeObjectIDAsHex(b bool) *StringCodecOptions {
+ t.DecodeObjectIDAsHex = &b
+ return t
+}
+
+// MergeStringCodecOptions combines the given *StringCodecOptions into a single *StringCodecOptions in a last one wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeStringCodecOptions(opts ...*StringCodecOptions) *StringCodecOptions {
+ s := &StringCodecOptions{&defaultDecodeOIDAsHex}
+ for _, opt := range opts {
+ if opt == nil {
+ continue
+ }
+ if opt.DecodeObjectIDAsHex != nil {
+ s.DecodeObjectIDAsHex = opt.DecodeObjectIDAsHex
+ }
+ }
+
+ return s
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go
new file mode 100644
index 00000000000..1cbfa32e8b4
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go
@@ -0,0 +1,107 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonoptions
+
+var defaultOverwriteDuplicatedInlinedFields = true
+
+// StructCodecOptions represents all possible options for struct encoding and decoding.
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+type StructCodecOptions struct {
+ DecodeZeroStruct *bool // Specifies if structs should be zeroed before decoding into them. Defaults to false.
+ DecodeDeepZeroInline *bool // Specifies if structs should be recursively zeroed when a inline value is decoded. Defaults to false.
+ EncodeOmitDefaultStruct *bool // Specifies if default structs should be considered empty by omitempty. Defaults to false.
+ AllowUnexportedFields *bool // Specifies if unexported fields should be marshaled/unmarshaled. Defaults to false.
+ OverwriteDuplicatedInlinedFields *bool // Specifies if fields in inlined structs can be overwritten by higher level struct fields with the same key. Defaults to true.
+}
+
+// StructCodec creates a new *StructCodecOptions
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+func StructCodec() *StructCodecOptions {
+ return &StructCodecOptions{}
+}
+
+// SetDecodeZeroStruct specifies if structs should be zeroed before decoding into them. Defaults to false.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead.
+func (t *StructCodecOptions) SetDecodeZeroStruct(b bool) *StructCodecOptions {
+ t.DecodeZeroStruct = &b
+ return t
+}
+
+// SetDecodeDeepZeroInline specifies if structs should be zeroed before decoding into them. Defaults to false.
+//
+// Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0.
+func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions {
+ t.DecodeDeepZeroInline = &b
+ return t
+}
+
+// SetEncodeOmitDefaultStruct specifies if default structs should be considered empty by omitempty. A default struct has all
+// its values set to their default value. Defaults to false.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead.
+func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOptions {
+ t.EncodeOmitDefaultStruct = &b
+ return t
+}
+
+// SetOverwriteDuplicatedInlinedFields specifies if inlined struct fields can be overwritten by higher level struct fields with the
+// same bson key. When true and decoding, values will be written to the outermost struct with a matching key, and when
+// encoding, keys will have the value of the top-most matching field. When false, decoding and encoding will error if
+// there are duplicate keys after the struct is inlined. Defaults to true.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead.
+func (t *StructCodecOptions) SetOverwriteDuplicatedInlinedFields(b bool) *StructCodecOptions {
+ t.OverwriteDuplicatedInlinedFields = &b
+ return t
+}
+
+// SetAllowUnexportedFields specifies if unexported fields should be marshaled/unmarshaled. Defaults to false.
+//
+// Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be
+// supported in Go Driver 2.0.
+func (t *StructCodecOptions) SetAllowUnexportedFields(b bool) *StructCodecOptions {
+ t.AllowUnexportedFields = &b
+ return t
+}
+
+// MergeStructCodecOptions combines the given *StructCodecOptions into a single *StructCodecOptions in a last one wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeStructCodecOptions(opts ...*StructCodecOptions) *StructCodecOptions {
+ s := &StructCodecOptions{
+ OverwriteDuplicatedInlinedFields: &defaultOverwriteDuplicatedInlinedFields,
+ }
+ for _, opt := range opts {
+ if opt == nil {
+ continue
+ }
+
+ if opt.DecodeZeroStruct != nil {
+ s.DecodeZeroStruct = opt.DecodeZeroStruct
+ }
+ if opt.DecodeDeepZeroInline != nil {
+ s.DecodeDeepZeroInline = opt.DecodeDeepZeroInline
+ }
+ if opt.EncodeOmitDefaultStruct != nil {
+ s.EncodeOmitDefaultStruct = opt.EncodeOmitDefaultStruct
+ }
+ if opt.OverwriteDuplicatedInlinedFields != nil {
+ s.OverwriteDuplicatedInlinedFields = opt.OverwriteDuplicatedInlinedFields
+ }
+ if opt.AllowUnexportedFields != nil {
+ s.AllowUnexportedFields = opt.AllowUnexportedFields
+ }
+ }
+
+ return s
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go
new file mode 100644
index 00000000000..3f38433d226
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go
@@ -0,0 +1,49 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonoptions
+
+// TimeCodecOptions represents all possible options for time.Time encoding and decoding.
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+type TimeCodecOptions struct {
+ UseLocalTimeZone *bool // Specifies if we should decode into the local time zone. Defaults to false.
+}
+
+// TimeCodec creates a new *TimeCodecOptions
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+func TimeCodec() *TimeCodecOptions {
+ return &TimeCodecOptions{}
+}
+
+// SetUseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead.
+func (t *TimeCodecOptions) SetUseLocalTimeZone(b bool) *TimeCodecOptions {
+ t.UseLocalTimeZone = &b
+ return t
+}
+
+// MergeTimeCodecOptions combines the given *TimeCodecOptions into a single *TimeCodecOptions in a last one wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeTimeCodecOptions(opts ...*TimeCodecOptions) *TimeCodecOptions {
+ t := TimeCodec()
+ for _, opt := range opts {
+ if opt == nil {
+ continue
+ }
+ if opt.UseLocalTimeZone != nil {
+ t.UseLocalTimeZone = opt.UseLocalTimeZone
+ }
+ }
+
+ return t
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go
new file mode 100644
index 00000000000..5091e4d9633
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go
@@ -0,0 +1,49 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonoptions
+
+// UIntCodecOptions represents all possible options for uint encoding and decoding.
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+type UIntCodecOptions struct {
+ EncodeToMinSize *bool // Specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false.
+}
+
+// UIntCodec creates a new *UIntCodecOptions
+//
+// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
+// and unmarshal behavior instead.
+func UIntCodec() *UIntCodecOptions {
+ return &UIntCodecOptions{}
+}
+
+// SetEncodeToMinSize specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.IntMinSize] instead.
+func (u *UIntCodecOptions) SetEncodeToMinSize(b bool) *UIntCodecOptions {
+ u.EncodeToMinSize = &b
+ return u
+}
+
+// MergeUIntCodecOptions combines the given *UIntCodecOptions into a single *UIntCodecOptions in a last one wins fashion.
+//
+// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
+// single options struct instead.
+func MergeUIntCodecOptions(opts ...*UIntCodecOptions) *UIntCodecOptions {
+ u := UIntCodec()
+ for _, opt := range opts {
+ if opt == nil {
+ continue
+ }
+ if opt.EncodeToMinSize != nil {
+ u.EncodeToMinSize = opt.EncodeToMinSize
+ }
+ }
+
+ return u
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go
new file mode 100644
index 00000000000..1e25570b855
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go
@@ -0,0 +1,489 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+ "errors"
+ "fmt"
+ "io"
+
+ "go.mongodb.org/mongo-driver/bson/bsontype"
+ "go.mongodb.org/mongo-driver/bson/primitive"
+ "go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
+)
+
+// Copier is a type that allows copying between ValueReaders, ValueWriters, and
+// []byte values.
+//
+// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
+// supported in Go Driver 2.0.
+type Copier struct{}
+
+// NewCopier creates a new copier with the given registry. If a nil registry is provided
+// a default registry is used.
+//
+// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
+// supported in Go Driver 2.0.
+func NewCopier() Copier {
+ return Copier{}
+}
+
+// CopyDocument handles copying a document from src to dst.
+//
+// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
+// supported in Go Driver 2.0.
+func CopyDocument(dst ValueWriter, src ValueReader) error {
+ return Copier{}.CopyDocument(dst, src)
+}
+
+// CopyDocument handles copying one document from the src to the dst.
+//
+// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
+// supported in Go Driver 2.0.
+func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error {
+ dr, err := src.ReadDocument()
+ if err != nil {
+ return err
+ }
+
+ dw, err := dst.WriteDocument()
+ if err != nil {
+ return err
+ }
+
+ return c.copyDocumentCore(dw, dr)
+}
+
+// CopyArrayFromBytes copies the values from a BSON array represented as a
+// []byte to a ValueWriter.
+//
+// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be
+// supported in Go Driver 2.0.
+func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error {
+ aw, err := dst.WriteArray()
+ if err != nil {
+ return err
+ }
+
+ err = c.CopyBytesToArrayWriter(aw, src)
+ if err != nil {
+ return err
+ }
+
+ return aw.WriteArrayEnd()
+}
+
+// CopyDocumentFromBytes copies the values from a BSON document represented as a
+// []byte to a ValueWriter.
+//
+// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
+// supported in Go Driver 2.0.
+func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error {
+ dw, err := dst.WriteDocument()
+ if err != nil {
+ return err
+ }
+
+ err = c.CopyBytesToDocumentWriter(dw, src)
+ if err != nil {
+ return err
+ }
+
+ return dw.WriteDocumentEnd()
+}
+
+type writeElementFn func(key string) (ValueWriter, error)
+
+// CopyBytesToArrayWriter copies the values from a BSON Array represented as a []byte to an
+// ArrayWriter.
+//
+// Deprecated: Copying BSON arrays using the ArrayWriter interface will not be supported in Go
+// Driver 2.0.
+func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error {
+ wef := func(_ string) (ValueWriter, error) {
+ return dst.WriteArrayElement()
+ }
+
+ return c.copyBytesToValueWriter(src, wef)
+}
+
+// CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a
+// DocumentWriter.
+//
+// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
+// supported in Go Driver 2.0.
+func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error {
+ wef := func(key string) (ValueWriter, error) {
+ return dst.WriteDocumentElement(key)
+ }
+
+ return c.copyBytesToValueWriter(src, wef)
+}
+
+func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error {
+ // TODO(skriptble): Create errors types here. Anything that is a tag should be a property.
+ length, rem, ok := bsoncore.ReadLength(src)
+ if !ok {
+ return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src))
+ }
+ if len(src) < int(length) {
+ return fmt.Errorf("length read exceeds number of bytes available. length=%d bytes=%d", len(src), length)
+ }
+ rem = rem[:length-4]
+
+ var t bsontype.Type
+ var key string
+ var val bsoncore.Value
+ for {
+ t, rem, ok = bsoncore.ReadType(rem)
+ if !ok {
+ return io.EOF
+ }
+ if t == bsontype.Type(0) {
+ if len(rem) != 0 {
+ return fmt.Errorf("document end byte found before end of document. remaining bytes=%v", rem)
+ }
+ break
+ }
+
+ key, rem, ok = bsoncore.ReadKey(rem)
+ if !ok {
+ return fmt.Errorf("invalid key found. remaining bytes=%v", rem)
+ }
+
+ // write as either array element or document element using writeElementFn
+ vw, err := wef(key)
+ if err != nil {
+ return err
+ }
+
+ val, rem, ok = bsoncore.ReadValue(rem, t)
+ if !ok {
+ return fmt.Errorf("not enough bytes available to read type. bytes=%d type=%s", len(rem), t)
+ }
+ err = c.CopyValueFromBytes(vw, t, val.Data)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// CopyDocumentToBytes copies an entire document from the ValueReader and
+// returns it as bytes.
+//
+// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
+// supported in Go Driver 2.0.
+func (c Copier) CopyDocumentToBytes(src ValueReader) ([]byte, error) {
+ return c.AppendDocumentBytes(nil, src)
+}
+
+// AppendDocumentBytes functions the same as CopyDocumentToBytes, but will
+// append the result to dst.
+//
+// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
+// supported in Go Driver 2.0.
+func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) {
+ if br, ok := src.(BytesReader); ok {
+ _, dst, err := br.ReadValueBytes(dst)
+ return dst, err
+ }
+
+ vw := vwPool.Get().(*valueWriter)
+ defer putValueWriter(vw)
+
+ vw.reset(dst)
+
+ err := c.CopyDocument(vw, src)
+ dst = vw.buf
+ return dst, err
+}
+
+// AppendArrayBytes copies an array from the ValueReader to dst.
+//
+// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be
+// supported in Go Driver 2.0.
+func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) {
+ if br, ok := src.(BytesReader); ok {
+ _, dst, err := br.ReadValueBytes(dst)
+ return dst, err
+ }
+
+ vw := vwPool.Get().(*valueWriter)
+ defer putValueWriter(vw)
+
+ vw.reset(dst)
+
+ err := c.copyArray(vw, src)
+ dst = vw.buf
+ return dst, err
+}
+
+// CopyValueFromBytes will write the value represtend by t and src to dst.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.UnmarshalValue] instead.
+func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error {
+ if wvb, ok := dst.(BytesWriter); ok {
+ return wvb.WriteValueBytes(t, src)
+ }
+
+ vr := vrPool.Get().(*valueReader)
+ defer vrPool.Put(vr)
+
+ vr.reset(src)
+ vr.pushElement(t)
+
+ return c.CopyValue(dst, vr)
+}
+
+// CopyValueToBytes copies a value from src and returns it as a bsontype.Type and a
+// []byte.
+//
+// Deprecated: Use [go.mongodb.org/mongo-driver/bson.MarshalValue] instead.
+func (c Copier) CopyValueToBytes(src ValueReader) (bsontype.Type, []byte, error) {
+ return c.AppendValueBytes(nil, src)
+}
+
+// AppendValueBytes functions the same as CopyValueToBytes, but will append the
+// result to dst.
+//
+// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go
+// Driver 2.0.
+func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []byte, error) {
+ if br, ok := src.(BytesReader); ok {
+ return br.ReadValueBytes(dst)
+ }
+
+ vw := vwPool.Get().(*valueWriter)
+ defer putValueWriter(vw)
+
+ start := len(dst)
+
+ vw.reset(dst)
+ vw.push(mElement)
+
+ err := c.CopyValue(vw, src)
+ if err != nil {
+ return 0, dst, err
+ }
+
+ return bsontype.Type(vw.buf[start]), vw.buf[start+2:], nil
+}
+
+// CopyValue will copy a single value from src to dst.
+//
+// Deprecated: Copying BSON values using the ValueWriter and ValueReader interfaces will not be
+// supported in Go Driver 2.0.
+func (c Copier) CopyValue(dst ValueWriter, src ValueReader) error {
+ var err error
+ switch src.Type() {
+ case bsontype.Double:
+ var f64 float64
+ f64, err = src.ReadDouble()
+ if err != nil {
+ break
+ }
+ err = dst.WriteDouble(f64)
+ case bsontype.String:
+ var str string
+ str, err = src.ReadString()
+ if err != nil {
+ return err
+ }
+ err = dst.WriteString(str)
+ case bsontype.EmbeddedDocument:
+ err = c.CopyDocument(dst, src)
+ case bsontype.Array:
+ err = c.copyArray(dst, src)
+ case bsontype.Binary:
+ var data []byte
+ var subtype byte
+ data, subtype, err = src.ReadBinary()
+ if err != nil {
+ break
+ }
+ err = dst.WriteBinaryWithSubtype(data, subtype)
+ case bsontype.Undefined:
+ err = src.ReadUndefined()
+ if err != nil {
+ break
+ }
+ err = dst.WriteUndefined()
+ case bsontype.ObjectID:
+ var oid primitive.ObjectID
+ oid, err = src.ReadObjectID()
+ if err != nil {
+ break
+ }
+ err = dst.WriteObjectID(oid)
+ case bsontype.Boolean:
+ var b bool
+ b, err = src.ReadBoolean()
+ if err != nil {
+ break
+ }
+ err = dst.WriteBoolean(b)
+ case bsontype.DateTime:
+ var dt int64
+ dt, err = src.ReadDateTime()
+ if err != nil {
+ break
+ }
+ err = dst.WriteDateTime(dt)
+ case bsontype.Null:
+ err = src.ReadNull()
+ if err != nil {
+ break
+ }
+ err = dst.WriteNull()
+ case bsontype.Regex:
+ var pattern, options string
+ pattern, options, err = src.ReadRegex()
+ if err != nil {
+ break
+ }
+ err = dst.WriteRegex(pattern, options)
+ case bsontype.DBPointer:
+ var ns string
+ var pointer primitive.ObjectID
+ ns, pointer, err = src.ReadDBPointer()
+ if err != nil {
+ break
+ }
+ err = dst.WriteDBPointer(ns, pointer)
+ case bsontype.JavaScript:
+ var js string
+ js, err = src.ReadJavascript()
+ if err != nil {
+ break
+ }
+ err = dst.WriteJavascript(js)
+ case bsontype.Symbol:
+ var symbol string
+ symbol, err = src.ReadSymbol()
+ if err != nil {
+ break
+ }
+ err = dst.WriteSymbol(symbol)
+ case bsontype.CodeWithScope:
+ var code string
+ var srcScope DocumentReader
+ code, srcScope, err = src.ReadCodeWithScope()
+ if err != nil {
+ break
+ }
+
+ var dstScope DocumentWriter
+ dstScope, err = dst.WriteCodeWithScope(code)
+ if err != nil {
+ break
+ }
+ err = c.copyDocumentCore(dstScope, srcScope)
+ case bsontype.Int32:
+ var i32 int32
+ i32, err = src.ReadInt32()
+ if err != nil {
+ break
+ }
+ err = dst.WriteInt32(i32)
+ case bsontype.Timestamp:
+ var t, i uint32
+ t, i, err = src.ReadTimestamp()
+ if err != nil {
+ break
+ }
+ err = dst.WriteTimestamp(t, i)
+ case bsontype.Int64:
+ var i64 int64
+ i64, err = src.ReadInt64()
+ if err != nil {
+ break
+ }
+ err = dst.WriteInt64(i64)
+ case bsontype.Decimal128:
+ var d128 primitive.Decimal128
+ d128, err = src.ReadDecimal128()
+ if err != nil {
+ break
+ }
+ err = dst.WriteDecimal128(d128)
+ case bsontype.MinKey:
+ err = src.ReadMinKey()
+ if err != nil {
+ break
+ }
+ err = dst.WriteMinKey()
+ case bsontype.MaxKey:
+ err = src.ReadMaxKey()
+ if err != nil {
+ break
+ }
+ err = dst.WriteMaxKey()
+ default:
+ err = fmt.Errorf("Cannot copy unknown BSON type %s", src.Type())
+ }
+
+ return err
+}
+
+func (c Copier) copyArray(dst ValueWriter, src ValueReader) error {
+ ar, err := src.ReadArray()
+ if err != nil {
+ return err
+ }
+
+ aw, err := dst.WriteArray()
+ if err != nil {
+ return err
+ }
+
+ for {
+ vr, err := ar.ReadValue()
+ if errors.Is(err, ErrEOA) {
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ vw, err := aw.WriteArrayElement()
+ if err != nil {
+ return err
+ }
+
+ err = c.CopyValue(vw, vr)
+ if err != nil {
+ return err
+ }
+ }
+
+ return aw.WriteArrayEnd()
+}
+
+func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error {
+ for {
+ key, vr, err := dr.ReadElement()
+ if errors.Is(err, ErrEOD) {
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ vw, err := dw.WriteDocumentElement(key)
+ if err != nil {
+ return err
+ }
+
+ err = c.CopyValue(vw, vr)
+ if err != nil {
+ return err
+ }
+ }
+
+ return dw.WriteDocumentEnd()
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go
new file mode 100644
index 00000000000..750b0d2af51
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go
@@ -0,0 +1,9 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package bsonrw contains abstractions for reading and writing
+// BSON and BSON like types from sources.
+package bsonrw // import "go.mongodb.org/mongo-driver/bson/bsonrw"
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go
new file mode 100644
index 00000000000..f0702d9d302
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go
@@ -0,0 +1,806 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+ "encoding/base64"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+
+ "go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+const maxNestingDepth = 200
+
+// ErrInvalidJSON indicates the JSON input is invalid
+var ErrInvalidJSON = errors.New("invalid JSON input")
+
+type jsonParseState byte
+
+const (
+ jpsStartState jsonParseState = iota
+ jpsSawBeginObject
+ jpsSawEndObject
+ jpsSawBeginArray
+ jpsSawEndArray
+ jpsSawColon
+ jpsSawComma
+ jpsSawKey
+ jpsSawValue
+ jpsDoneState
+ jpsInvalidState
+)
+
+type jsonParseMode byte
+
+const (
+ jpmInvalidMode jsonParseMode = iota
+ jpmObjectMode
+ jpmArrayMode
+)
+
+type extJSONValue struct {
+ t bsontype.Type
+ v interface{}
+}
+
+type extJSONObject struct {
+ keys []string
+ values []*extJSONValue
+}
+
+type extJSONParser struct {
+ js *jsonScanner
+ s jsonParseState
+ m []jsonParseMode
+ k string
+ v *extJSONValue
+
+ err error
+ canonical bool
+ depth int
+ maxDepth int
+
+ emptyObject bool
+ relaxedUUID bool
+}
+
+// newExtJSONParser returns a new extended JSON parser, ready to to begin
+// parsing from the first character of the argued json input. It will not
+// perform any read-ahead and will therefore not report any errors about
+// malformed JSON at this point.
+func newExtJSONParser(r io.Reader, canonical bool) *extJSONParser {
+ return &extJSONParser{
+ js: &jsonScanner{r: r},
+ s: jpsStartState,
+ m: []jsonParseMode{},
+ canonical: canonical,
+ maxDepth: maxNestingDepth,
+ }
+}
+
+// peekType examines the next value and returns its BSON Type
+func (ejp *extJSONParser) peekType() (bsontype.Type, error) {
+ var t bsontype.Type
+ var err error
+ initialState := ejp.s
+
+ ejp.advanceState()
+ switch ejp.s {
+ case jpsSawValue:
+ t = ejp.v.t
+ case jpsSawBeginArray:
+ t = bsontype.Array
+ case jpsInvalidState:
+ err = ejp.err
+ case jpsSawComma:
+ // in array mode, seeing a comma means we need to progress again to actually observe a type
+ if ejp.peekMode() == jpmArrayMode {
+ return ejp.peekType()
+ }
+ case jpsSawEndArray:
+ // this would only be a valid state if we were in array mode, so return end-of-array error
+ err = ErrEOA
+ case jpsSawBeginObject:
+ // peek key to determine type
+ ejp.advanceState()
+ switch ejp.s {
+ case jpsSawEndObject: // empty embedded document
+ t = bsontype.EmbeddedDocument
+ ejp.emptyObject = true
+ case jpsInvalidState:
+ err = ejp.err
+ case jpsSawKey:
+ if initialState == jpsStartState {
+ return bsontype.EmbeddedDocument, nil
+ }
+ t = wrapperKeyBSONType(ejp.k)
+
+ // if $uuid is encountered, parse as binary subtype 4
+ if ejp.k == "$uuid" {
+ ejp.relaxedUUID = true
+ t = bsontype.Binary
+ }
+
+ switch t {
+ case bsontype.JavaScript:
+ // just saw $code, need to check for $scope at same level
+ _, err = ejp.readValue(bsontype.JavaScript)
+ if err != nil {
+ break
+ }
+
+ switch ejp.s {
+ case jpsSawEndObject: // type is TypeJavaScript
+ case jpsSawComma:
+ ejp.advanceState()
+
+ if ejp.s == jpsSawKey && ejp.k == "$scope" {
+ t = bsontype.CodeWithScope
+ } else {
+ err = fmt.Errorf("invalid extended JSON: unexpected key %s in CodeWithScope object", ejp.k)
+ }
+ case jpsInvalidState:
+ err = ejp.err
+ default:
+ err = ErrInvalidJSON
+ }
+ case bsontype.CodeWithScope:
+ err = errors.New("invalid extended JSON: code with $scope must contain $code before $scope")
+ }
+ }
+ }
+
+ return t, err
+}
+
+// readKey parses the next key and its type and returns them
+func (ejp *extJSONParser) readKey() (string, bsontype.Type, error) {
+ if ejp.emptyObject {
+ ejp.emptyObject = false
+ return "", 0, ErrEOD
+ }
+
+ // advance to key (or return with error)
+ switch ejp.s {
+ case jpsStartState:
+ ejp.advanceState()
+ if ejp.s == jpsSawBeginObject {
+ ejp.advanceState()
+ }
+ case jpsSawBeginObject:
+ ejp.advanceState()
+ case jpsSawValue, jpsSawEndObject, jpsSawEndArray:
+ ejp.advanceState()
+ switch ejp.s {
+ case jpsSawBeginObject, jpsSawComma:
+ ejp.advanceState()
+ case jpsSawEndObject:
+ return "", 0, ErrEOD
+ case jpsDoneState:
+ return "", 0, io.EOF
+ case jpsInvalidState:
+ return "", 0, ejp.err
+ default:
+ return "", 0, ErrInvalidJSON
+ }
+ case jpsSawKey: // do nothing (key was peeked before)
+ default:
+ return "", 0, invalidRequestError("key")
+ }
+
+ // read key
+ var key string
+
+ switch ejp.s {
+ case jpsSawKey:
+ key = ejp.k
+ case jpsSawEndObject:
+ return "", 0, ErrEOD
+ case jpsInvalidState:
+ return "", 0, ejp.err
+ default:
+ return "", 0, invalidRequestError("key")
+ }
+
+ // check for colon
+ ejp.advanceState()
+ if err := ensureColon(ejp.s, key); err != nil {
+ return "", 0, err
+ }
+
+ // peek at the value to determine type
+ t, err := ejp.peekType()
+ if err != nil {
+ return "", 0, err
+ }
+
+ return key, t, nil
+}
+
+// readValue returns the value corresponding to the Type returned by peekType
+func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) {
+ if ejp.s == jpsInvalidState {
+ return nil, ejp.err
+ }
+
+ var v *extJSONValue
+
+ switch t {
+ case bsontype.Null, bsontype.Boolean, bsontype.String:
+ if ejp.s != jpsSawValue {
+ return nil, invalidRequestError(t.String())
+ }
+ v = ejp.v
+ case bsontype.Int32, bsontype.Int64, bsontype.Double:
+ // relaxed version allows these to be literal number values
+ if ejp.s == jpsSawValue {
+ v = ejp.v
+ break
+ }
+ fallthrough
+ case bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID, bsontype.MinKey, bsontype.MaxKey, bsontype.Undefined:
+ switch ejp.s {
+ case jpsSawKey:
+ // read colon
+ ejp.advanceState()
+ if err := ensureColon(ejp.s, ejp.k); err != nil {
+ return nil, err
+ }
+
+ // read value
+ ejp.advanceState()
+ if ejp.s != jpsSawValue || !ejp.ensureExtValueType(t) {
+ return nil, invalidJSONErrorForType("value", t)
+ }
+
+ v = ejp.v
+
+ // read end object
+ ejp.advanceState()
+ if ejp.s != jpsSawEndObject {
+ return nil, invalidJSONErrorForType("} after value", t)
+ }
+ default:
+ return nil, invalidRequestError(t.String())
+ }
+ case bsontype.Binary, bsontype.Regex, bsontype.Timestamp, bsontype.DBPointer:
+ if ejp.s != jpsSawKey {
+ return nil, invalidRequestError(t.String())
+ }
+ // read colon
+ ejp.advanceState()
+ if err := ensureColon(ejp.s, ejp.k); err != nil {
+ return nil, err
+ }
+
+ ejp.advanceState()
+ if t == bsontype.Binary && ejp.s == jpsSawValue {
+ // convert relaxed $uuid format
+ if ejp.relaxedUUID {
+ defer func() { ejp.relaxedUUID = false }()
+ uuid, err := ejp.v.parseSymbol()
+ if err != nil {
+ return nil, err
+ }
+
+ // RFC 4122 defines the length of a UUID as 36 and the hyphens in a UUID as appearing
+ // in the 8th, 13th, 18th, and 23rd characters.
+ //
+ // See https://tools.ietf.org/html/rfc4122#section-3
+ valid := len(uuid) == 36 &&
+ string(uuid[8]) == "-" &&
+ string(uuid[13]) == "-" &&
+ string(uuid[18]) == "-" &&
+ string(uuid[23]) == "-"
+ if !valid {
+ return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens")
+ }
+
+ // remove hyphens
+ uuidNoHyphens := strings.ReplaceAll(uuid, "-", "")
+ if len(uuidNoHyphens) != 32 {
+ return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens")
+ }
+
+ // convert hex to bytes
+ bytes, err := hex.DecodeString(uuidNoHyphens)
+ if err != nil {
+ return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %w", err)
+ }
+
+ ejp.advanceState()
+ if ejp.s != jpsSawEndObject {
+ return nil, invalidJSONErrorForType("$uuid and value and then }", bsontype.Binary)
+ }
+
+ base64 := &extJSONValue{
+ t: bsontype.String,
+ v: base64.StdEncoding.EncodeToString(bytes),
+ }
+ subType := &extJSONValue{
+ t: bsontype.String,
+ v: "04",
+ }
+
+ v = &extJSONValue{
+ t: bsontype.EmbeddedDocument,
+ v: &extJSONObject{
+ keys: []string{"base64", "subType"},
+ values: []*extJSONValue{base64, subType},
+ },
+ }
+
+ break
+ }
+
+ // convert legacy $binary format
+ base64 := ejp.v
+
+ ejp.advanceState()
+ if ejp.s != jpsSawComma {
+ return nil, invalidJSONErrorForType(",", bsontype.Binary)
+ }
+
+ ejp.advanceState()
+ key, t, err := ejp.readKey()
+ if err != nil {
+ return nil, err
+ }
+ if key != "$type" {
+ return nil, invalidJSONErrorForType("$type", bsontype.Binary)
+ }
+
+ subType, err := ejp.readValue(t)
+ if err != nil {
+ return nil, err
+ }
+
+ ejp.advanceState()
+ if ejp.s != jpsSawEndObject {
+ return nil, invalidJSONErrorForType("2 key-value pairs and then }", bsontype.Binary)
+ }
+
+ v = &extJSONValue{
+ t: bsontype.EmbeddedDocument,
+ v: &extJSONObject{
+ keys: []string{"base64", "subType"},
+ values: []*extJSONValue{base64, subType},
+ },
+ }
+ break
+ }
+
+ // read KV pairs
+ if ejp.s != jpsSawBeginObject {
+ return nil, invalidJSONErrorForType("{", t)
+ }
+
+ keys, vals, err := ejp.readObject(2, true)
+ if err != nil {
+ return nil, err
+ }
+
+ ejp.advanceState()
+ if ejp.s != jpsSawEndObject {
+ return nil, invalidJSONErrorForType("2 key-value pairs and then }", t)
+ }
+
+ v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}}
+
+ case bsontype.DateTime:
+ switch ejp.s {
+ case jpsSawValue:
+ v = ejp.v
+ case jpsSawKey:
+ // read colon
+ ejp.advanceState()
+ if err := ensureColon(ejp.s, ejp.k); err != nil {
+ return nil, err
+ }
+
+ ejp.advanceState()
+ switch ejp.s {
+ case jpsSawBeginObject:
+ keys, vals, err := ejp.readObject(1, true)
+ if err != nil {
+ return nil, err
+ }
+ v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}}
+ case jpsSawValue:
+ if ejp.canonical {
+ return nil, invalidJSONError("{")
+ }
+ v = ejp.v
+ default:
+ if ejp.canonical {
+ return nil, invalidJSONErrorForType("object", t)
+ }
+ return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as described in RFC-3339", t)
+ }
+
+ ejp.advanceState()
+ if ejp.s != jpsSawEndObject {
+ return nil, invalidJSONErrorForType("value and then }", t)
+ }
+ default:
+ return nil, invalidRequestError(t.String())
+ }
+ case bsontype.JavaScript:
+ switch ejp.s {
+ case jpsSawKey:
+ // read colon
+ ejp.advanceState()
+ if err := ensureColon(ejp.s, ejp.k); err != nil {
+ return nil, err
+ }
+
+ // read value
+ ejp.advanceState()
+ if ejp.s != jpsSawValue {
+ return nil, invalidJSONErrorForType("value", t)
+ }
+ v = ejp.v
+
+ // read end object or comma and just return
+ ejp.advanceState()
+ case jpsSawEndObject:
+ v = ejp.v
+ default:
+ return nil, invalidRequestError(t.String())
+ }
+ case bsontype.CodeWithScope:
+ if ejp.s == jpsSawKey && ejp.k == "$scope" {
+ v = ejp.v // this is the $code string from earlier
+
+ // read colon
+ ejp.advanceState()
+ if err := ensureColon(ejp.s, ejp.k); err != nil {
+ return nil, err
+ }
+
+ // read {
+ ejp.advanceState()
+ if ejp.s != jpsSawBeginObject {
+ return nil, invalidJSONError("$scope to be embedded document")
+ }
+ } else {
+ return nil, invalidRequestError(t.String())
+ }
+ case bsontype.EmbeddedDocument, bsontype.Array:
+ return nil, invalidRequestError(t.String())
+ }
+
+ return v, nil
+}
+
+// readObject is a utility method for reading full objects of known (or expected) size
+// it is useful for extended JSON types such as binary, datetime, regex, and timestamp
+func (ejp *extJSONParser) readObject(numKeys int, started bool) ([]string, []*extJSONValue, error) {
+ keys := make([]string, numKeys)
+ vals := make([]*extJSONValue, numKeys)
+
+ if !started {
+ ejp.advanceState()
+ if ejp.s != jpsSawBeginObject {
+ return nil, nil, invalidJSONError("{")
+ }
+ }
+
+ for i := 0; i < numKeys; i++ {
+ key, t, err := ejp.readKey()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ switch ejp.s {
+ case jpsSawKey:
+ v, err := ejp.readValue(t)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ keys[i] = key
+ vals[i] = v
+ case jpsSawValue:
+ keys[i] = key
+ vals[i] = ejp.v
+ default:
+ return nil, nil, invalidJSONError("value")
+ }
+ }
+
+ ejp.advanceState()
+ if ejp.s != jpsSawEndObject {
+ return nil, nil, invalidJSONError("}")
+ }
+
+ return keys, vals, nil
+}
+
+// advanceState reads the next JSON token from the scanner and transitions
+// from the current state based on that token's type
+func (ejp *extJSONParser) advanceState() {
+ if ejp.s == jpsDoneState || ejp.s == jpsInvalidState {
+ return
+ }
+
+ jt, err := ejp.js.nextToken()
+
+ if err != nil {
+ ejp.err = err
+ ejp.s = jpsInvalidState
+ return
+ }
+
+ valid := ejp.validateToken(jt.t)
+ if !valid {
+ ejp.err = unexpectedTokenError(jt)
+ ejp.s = jpsInvalidState
+ return
+ }
+
+ switch jt.t {
+ case jttBeginObject:
+ ejp.s = jpsSawBeginObject
+ ejp.pushMode(jpmObjectMode)
+ ejp.depth++
+
+ if ejp.depth > ejp.maxDepth {
+ ejp.err = nestingDepthError(jt.p, ejp.depth)
+ ejp.s = jpsInvalidState
+ }
+ case jttEndObject:
+ ejp.s = jpsSawEndObject
+ ejp.depth--
+
+ if ejp.popMode() != jpmObjectMode {
+ ejp.err = unexpectedTokenError(jt)
+ ejp.s = jpsInvalidState
+ }
+ case jttBeginArray:
+ ejp.s = jpsSawBeginArray
+ ejp.pushMode(jpmArrayMode)
+ case jttEndArray:
+ ejp.s = jpsSawEndArray
+
+ if ejp.popMode() != jpmArrayMode {
+ ejp.err = unexpectedTokenError(jt)
+ ejp.s = jpsInvalidState
+ }
+ case jttColon:
+ ejp.s = jpsSawColon
+ case jttComma:
+ ejp.s = jpsSawComma
+ case jttEOF:
+ ejp.s = jpsDoneState
+ if len(ejp.m) != 0 {
+ ejp.err = unexpectedTokenError(jt)
+ ejp.s = jpsInvalidState
+ }
+ case jttString:
+ switch ejp.s {
+ case jpsSawComma:
+ if ejp.peekMode() == jpmArrayMode {
+ ejp.s = jpsSawValue
+ ejp.v = extendJSONToken(jt)
+ return
+ }
+ fallthrough
+ case jpsSawBeginObject:
+ ejp.s = jpsSawKey
+ ejp.k = jt.v.(string)
+ return
+ }
+ fallthrough
+ default:
+ ejp.s = jpsSawValue
+ ejp.v = extendJSONToken(jt)
+ }
+}
+
+var jpsValidTransitionTokens = map[jsonParseState]map[jsonTokenType]bool{
+ jpsStartState: {
+ jttBeginObject: true,
+ jttBeginArray: true,
+ jttInt32: true,
+ jttInt64: true,
+ jttDouble: true,
+ jttString: true,
+ jttBool: true,
+ jttNull: true,
+ jttEOF: true,
+ },
+ jpsSawBeginObject: {
+ jttEndObject: true,
+ jttString: true,
+ },
+ jpsSawEndObject: {
+ jttEndObject: true,
+ jttEndArray: true,
+ jttComma: true,
+ jttEOF: true,
+ },
+ jpsSawBeginArray: {
+ jttBeginObject: true,
+ jttBeginArray: true,
+ jttEndArray: true,
+ jttInt32: true,
+ jttInt64: true,
+ jttDouble: true,
+ jttString: true,
+ jttBool: true,
+ jttNull: true,
+ },
+ jpsSawEndArray: {
+ jttEndObject: true,
+ jttEndArray: true,
+ jttComma: true,
+ jttEOF: true,
+ },
+ jpsSawColon: {
+ jttBeginObject: true,
+ jttBeginArray: true,
+ jttInt32: true,
+ jttInt64: true,
+ jttDouble: true,
+ jttString: true,
+ jttBool: true,
+ jttNull: true,
+ },
+ jpsSawComma: {
+ jttBeginObject: true,
+ jttBeginArray: true,
+ jttInt32: true,
+ jttInt64: true,
+ jttDouble: true,
+ jttString: true,
+ jttBool: true,
+ jttNull: true,
+ },
+ jpsSawKey: {
+ jttColon: true,
+ },
+ jpsSawValue: {
+ jttEndObject: true,
+ jttEndArray: true,
+ jttComma: true,
+ jttEOF: true,
+ },
+ jpsDoneState: {},
+ jpsInvalidState: {},
+}
+
+func (ejp *extJSONParser) validateToken(jtt jsonTokenType) bool {
+ switch ejp.s {
+ case jpsSawEndObject:
+ // if we are at depth zero and the next token is a '{',
+ // we can consider it valid only if we are not in array mode.
+ if jtt == jttBeginObject && ejp.depth == 0 {
+ return ejp.peekMode() != jpmArrayMode
+ }
+ case jpsSawComma:
+ switch ejp.peekMode() {
+ // the only valid next token after a comma inside a document is a string (a key)
+ case jpmObjectMode:
+ return jtt == jttString
+ case jpmInvalidMode:
+ return false
+ }
+ }
+
+ _, ok := jpsValidTransitionTokens[ejp.s][jtt]
+ return ok
+}
+
+// ensureExtValueType returns true if the current value has the expected
+// value type for single-key extended JSON types. For example,
+// {"$numberInt": v} v must be TypeString
+func (ejp *extJSONParser) ensureExtValueType(t bsontype.Type) bool {
+ switch t {
+ case bsontype.MinKey, bsontype.MaxKey:
+ return ejp.v.t == bsontype.Int32
+ case bsontype.Undefined:
+ return ejp.v.t == bsontype.Boolean
+ case bsontype.Int32, bsontype.Int64, bsontype.Double, bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID:
+ return ejp.v.t == bsontype.String
+ default:
+ return false
+ }
+}
+
+func (ejp *extJSONParser) pushMode(m jsonParseMode) {
+ ejp.m = append(ejp.m, m)
+}
+
+func (ejp *extJSONParser) popMode() jsonParseMode {
+ l := len(ejp.m)
+ if l == 0 {
+ return jpmInvalidMode
+ }
+
+ m := ejp.m[l-1]
+ ejp.m = ejp.m[:l-1]
+
+ return m
+}
+
+func (ejp *extJSONParser) peekMode() jsonParseMode {
+ l := len(ejp.m)
+ if l == 0 {
+ return jpmInvalidMode
+ }
+
+ return ejp.m[l-1]
+}
+
+func extendJSONToken(jt *jsonToken) *extJSONValue {
+ var t bsontype.Type
+
+ switch jt.t {
+ case jttInt32:
+ t = bsontype.Int32
+ case jttInt64:
+ t = bsontype.Int64
+ case jttDouble:
+ t = bsontype.Double
+ case jttString:
+ t = bsontype.String
+ case jttBool:
+ t = bsontype.Boolean
+ case jttNull:
+ t = bsontype.Null
+ default:
+ return nil
+ }
+
+ return &extJSONValue{t: t, v: jt.v}
+}
+
+func ensureColon(s jsonParseState, key string) error {
+ if s != jpsSawColon {
+ return fmt.Errorf("invalid JSON input: missing colon after key \"%s\"", key)
+ }
+
+ return nil
+}
+
+func invalidRequestError(s string) error {
+ return fmt.Errorf("invalid request to read %s", s)
+}
+
+func invalidJSONError(expected string) error {
+ return fmt.Errorf("invalid JSON input; expected %s", expected)
+}
+
+func invalidJSONErrorForType(expected string, t bsontype.Type) error {
+ return fmt.Errorf("invalid JSON input; expected %s for %s", expected, t)
+}
+
+func unexpectedTokenError(jt *jsonToken) error {
+ switch jt.t {
+ case jttInt32, jttInt64, jttDouble:
+ return fmt.Errorf("invalid JSON input; unexpected number (%v) at position %d", jt.v, jt.p)
+ case jttString:
+ return fmt.Errorf("invalid JSON input; unexpected string (\"%v\") at position %d", jt.v, jt.p)
+ case jttBool:
+ return fmt.Errorf("invalid JSON input; unexpected boolean literal (%v) at position %d", jt.v, jt.p)
+ case jttNull:
+ return fmt.Errorf("invalid JSON input; unexpected null literal at position %d", jt.p)
+ case jttEOF:
+ return fmt.Errorf("invalid JSON input; unexpected end of input at position %d", jt.p)
+ default:
+ return fmt.Errorf("invalid JSON input; unexpected %c at position %d", jt.v.(byte), jt.p)
+ }
+}
+
+func nestingDepthError(p, depth int) error {
+ return fmt.Errorf("invalid JSON input; nesting too deep (%d levels) at position %d", depth, p)
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go
new file mode 100644
index 00000000000..59ddfc44858
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go
@@ -0,0 +1,653 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+
+ "go.mongodb.org/mongo-driver/bson/bsontype"
+ "go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+// ExtJSONValueReaderPool is a pool for ValueReaders that read ExtJSON.
+//
+// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0.
+type ExtJSONValueReaderPool struct {
+ pool sync.Pool
+}
+
+// NewExtJSONValueReaderPool instantiates a new ExtJSONValueReaderPool.
+//
+// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0.
+func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool {
+ return &ExtJSONValueReaderPool{
+ pool: sync.Pool{
+ New: func() interface{} {
+ return new(extJSONValueReader)
+ },
+ },
+ }
+}
+
+// Get retrieves a ValueReader from the pool and uses src as the underlying ExtJSON.
+//
+// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0.
+func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReader, error) {
+ vr := bvrp.pool.Get().(*extJSONValueReader)
+ return vr.reset(r, canonical)
+}
+
+// Put inserts a ValueReader into the pool. If the ValueReader is not a ExtJSON ValueReader nothing
+// is inserted into the pool and ok will be false.
+//
+// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0.
+func (bvrp *ExtJSONValueReaderPool) Put(vr ValueReader) (ok bool) {
+ bvr, ok := vr.(*extJSONValueReader)
+ if !ok {
+ return false
+ }
+
+ bvr, _ = bvr.reset(nil, false)
+ bvrp.pool.Put(bvr)
+ return true
+}
+
+type ejvrState struct {
+ mode mode
+ vType bsontype.Type
+ depth int
+}
+
+// extJSONValueReader is for reading extended JSON.
+type extJSONValueReader struct {
+ p *extJSONParser
+
+ stack []ejvrState
+ frame int
+}
+
+// NewExtJSONValueReader creates a new ValueReader from a given io.Reader
+// It will interpret the JSON of r as canonical or relaxed according to the
+// given canonical flag
+func NewExtJSONValueReader(r io.Reader, canonical bool) (ValueReader, error) {
+ return newExtJSONValueReader(r, canonical)
+}
+
+func newExtJSONValueReader(r io.Reader, canonical bool) (*extJSONValueReader, error) {
+ ejvr := new(extJSONValueReader)
+ return ejvr.reset(r, canonical)
+}
+
+func (ejvr *extJSONValueReader) reset(r io.Reader, canonical bool) (*extJSONValueReader, error) {
+ p := newExtJSONParser(r, canonical)
+ typ, err := p.peekType()
+
+ if err != nil {
+ return nil, ErrInvalidJSON
+ }
+
+ var m mode
+ switch typ {
+ case bsontype.EmbeddedDocument:
+ m = mTopLevel
+ case bsontype.Array:
+ m = mArray
+ default:
+ m = mValue
+ }
+
+ stack := make([]ejvrState, 1, 5)
+ stack[0] = ejvrState{
+ mode: m,
+ vType: typ,
+ }
+ return &extJSONValueReader{
+ p: p,
+ stack: stack,
+ }, nil
+}
+
+func (ejvr *extJSONValueReader) advanceFrame() {
+ if ejvr.frame+1 >= len(ejvr.stack) { // We need to grow the stack
+ length := len(ejvr.stack)
+ if length+1 >= cap(ejvr.stack) {
+ // double it
+ buf := make([]ejvrState, 2*cap(ejvr.stack)+1)
+ copy(buf, ejvr.stack)
+ ejvr.stack = buf
+ }
+ ejvr.stack = ejvr.stack[:length+1]
+ }
+ ejvr.frame++
+
+ // Clean the stack
+ ejvr.stack[ejvr.frame].mode = 0
+ ejvr.stack[ejvr.frame].vType = 0
+ ejvr.stack[ejvr.frame].depth = 0
+}
+
+func (ejvr *extJSONValueReader) pushDocument() {
+ ejvr.advanceFrame()
+
+ ejvr.stack[ejvr.frame].mode = mDocument
+ ejvr.stack[ejvr.frame].depth = ejvr.p.depth
+}
+
+func (ejvr *extJSONValueReader) pushCodeWithScope() {
+ ejvr.advanceFrame()
+
+ ejvr.stack[ejvr.frame].mode = mCodeWithScope
+}
+
+func (ejvr *extJSONValueReader) pushArray() {
+ ejvr.advanceFrame()
+
+ ejvr.stack[ejvr.frame].mode = mArray
+}
+
+func (ejvr *extJSONValueReader) push(m mode, t bsontype.Type) {
+ ejvr.advanceFrame()
+
+ ejvr.stack[ejvr.frame].mode = m
+ ejvr.stack[ejvr.frame].vType = t
+}
+
+func (ejvr *extJSONValueReader) pop() {
+ switch ejvr.stack[ejvr.frame].mode {
+ case mElement, mValue:
+ ejvr.frame--
+ case mDocument, mArray, mCodeWithScope:
+ ejvr.frame -= 2 // we pop twice to jump over the vrElement: vrDocument -> vrElement -> vrDocument/TopLevel/etc...
+ }
+}
+
+func (ejvr *extJSONValueReader) skipObject() {
+ // read entire object until depth returns to 0 (last ending } or ] seen)
+ depth := 1
+ for depth > 0 {
+ ejvr.p.advanceState()
+
+ // If object is empty, raise depth and continue. When emptyObject is true, the
+ // parser has already read both the opening and closing brackets of an empty
+ // object ("{}"), so the next valid token will be part of the parent document,
+ // not part of the nested document.
+ //
+ // If there is a comma, there are remaining fields, emptyObject must be set back
+ // to false, and comma must be skipped with advanceState().
+ if ejvr.p.emptyObject {
+ if ejvr.p.s == jpsSawComma {
+ ejvr.p.emptyObject = false
+ ejvr.p.advanceState()
+ }
+ depth--
+ continue
+ }
+
+ switch ejvr.p.s {
+ case jpsSawBeginObject, jpsSawBeginArray:
+ depth++
+ case jpsSawEndObject, jpsSawEndArray:
+ depth--
+ }
+ }
+}
+
+func (ejvr *extJSONValueReader) invalidTransitionErr(destination mode, name string, modes []mode) error {
+ te := TransitionError{
+ name: name,
+ current: ejvr.stack[ejvr.frame].mode,
+ destination: destination,
+ modes: modes,
+ action: "read",
+ }
+ if ejvr.frame != 0 {
+ te.parent = ejvr.stack[ejvr.frame-1].mode
+ }
+ return te
+}
+
+func (ejvr *extJSONValueReader) typeError(t bsontype.Type) error {
+ return fmt.Errorf("positioned on %s, but attempted to read %s", ejvr.stack[ejvr.frame].vType, t)
+}
+
+func (ejvr *extJSONValueReader) ensureElementValue(t bsontype.Type, destination mode, callerName string, addModes ...mode) error {
+ switch ejvr.stack[ejvr.frame].mode {
+ case mElement, mValue:
+ if ejvr.stack[ejvr.frame].vType != t {
+ return ejvr.typeError(t)
+ }
+ default:
+ modes := []mode{mElement, mValue}
+ if addModes != nil {
+ modes = append(modes, addModes...)
+ }
+ return ejvr.invalidTransitionErr(destination, callerName, modes)
+ }
+
+ return nil
+}
+
+func (ejvr *extJSONValueReader) Type() bsontype.Type {
+ return ejvr.stack[ejvr.frame].vType
+}
+
+func (ejvr *extJSONValueReader) Skip() error {
+ switch ejvr.stack[ejvr.frame].mode {
+ case mElement, mValue:
+ default:
+ return ejvr.invalidTransitionErr(0, "Skip", []mode{mElement, mValue})
+ }
+
+ defer ejvr.pop()
+
+ t := ejvr.stack[ejvr.frame].vType
+ switch t {
+ case bsontype.Array, bsontype.EmbeddedDocument, bsontype.CodeWithScope:
+ // read entire array, doc or CodeWithScope
+ ejvr.skipObject()
+ default:
+ _, err := ejvr.p.readValue(t)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (ejvr *extJSONValueReader) ReadArray() (ArrayReader, error) {
+ switch ejvr.stack[ejvr.frame].mode {
+ case mTopLevel: // allow reading array from top level
+ case mArray:
+ return ejvr, nil
+ default:
+ if err := ejvr.ensureElementValue(bsontype.Array, mArray, "ReadArray", mTopLevel, mArray); err != nil {
+ return nil, err
+ }
+ }
+
+ ejvr.pushArray()
+
+ return ejvr, nil
+}
+
+func (ejvr *extJSONValueReader) ReadBinary() (b []byte, btype byte, err error) {
+ if err := ejvr.ensureElementValue(bsontype.Binary, 0, "ReadBinary"); err != nil {
+ return nil, 0, err
+ }
+
+ v, err := ejvr.p.readValue(bsontype.Binary)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ b, btype, err = v.parseBinary()
+
+ ejvr.pop()
+ return b, btype, err
+}
+
+func (ejvr *extJSONValueReader) ReadBoolean() (bool, error) {
+ if err := ejvr.ensureElementValue(bsontype.Boolean, 0, "ReadBoolean"); err != nil {
+ return false, err
+ }
+
+ v, err := ejvr.p.readValue(bsontype.Boolean)
+ if err != nil {
+ return false, err
+ }
+
+ if v.t != bsontype.Boolean {
+ return false, fmt.Errorf("expected type bool, but got type %s", v.t)
+ }
+
+ ejvr.pop()
+ return v.v.(bool), nil
+}
+
+func (ejvr *extJSONValueReader) ReadDocument() (DocumentReader, error) {
+ switch ejvr.stack[ejvr.frame].mode {
+ case mTopLevel:
+ return ejvr, nil
+ case mElement, mValue:
+ if ejvr.stack[ejvr.frame].vType != bsontype.EmbeddedDocument {
+ return nil, ejvr.typeError(bsontype.EmbeddedDocument)
+ }
+
+ ejvr.pushDocument()
+ return ejvr, nil
+ default:
+ return nil, ejvr.invalidTransitionErr(mDocument, "ReadDocument", []mode{mTopLevel, mElement, mValue})
+ }
+}
+
+func (ejvr *extJSONValueReader) ReadCodeWithScope() (code string, dr DocumentReader, err error) {
+ if err = ejvr.ensureElementValue(bsontype.CodeWithScope, 0, "ReadCodeWithScope"); err != nil {
+ return "", nil, err
+ }
+
+ v, err := ejvr.p.readValue(bsontype.CodeWithScope)
+ if err != nil {
+ return "", nil, err
+ }
+
+ code, err = v.parseJavascript()
+
+ ejvr.pushCodeWithScope()
+ return code, ejvr, err
+}
+
+func (ejvr *extJSONValueReader) ReadDBPointer() (ns string, oid primitive.ObjectID, err error) {
+ if err = ejvr.ensureElementValue(bsontype.DBPointer, 0, "ReadDBPointer"); err != nil {
+ return "", primitive.NilObjectID, err
+ }
+
+ v, err := ejvr.p.readValue(bsontype.DBPointer)
+ if err != nil {
+ return "", primitive.NilObjectID, err
+ }
+
+ ns, oid, err = v.parseDBPointer()
+
+ ejvr.pop()
+ return ns, oid, err
+}
+
+func (ejvr *extJSONValueReader) ReadDateTime() (int64, error) {
+ if err := ejvr.ensureElementValue(bsontype.DateTime, 0, "ReadDateTime"); err != nil {
+ return 0, err
+ }
+
+ v, err := ejvr.p.readValue(bsontype.DateTime)
+ if err != nil {
+ return 0, err
+ }
+
+ d, err := v.parseDateTime()
+
+ ejvr.pop()
+ return d, err
+}
+
+func (ejvr *extJSONValueReader) ReadDecimal128() (primitive.Decimal128, error) {
+ if err := ejvr.ensureElementValue(bsontype.Decimal128, 0, "ReadDecimal128"); err != nil {
+ return primitive.Decimal128{}, err
+ }
+
+ v, err := ejvr.p.readValue(bsontype.Decimal128)
+ if err != nil {
+ return primitive.Decimal128{}, err
+ }
+
+ d, err := v.parseDecimal128()
+
+ ejvr.pop()
+ return d, err
+}
+
+func (ejvr *extJSONValueReader) ReadDouble() (float64, error) {
+ if err := ejvr.ensureElementValue(bsontype.Double, 0, "ReadDouble"); err != nil {
+ return 0, err
+ }
+
+ v, err := ejvr.p.readValue(bsontype.Double)
+ if err != nil {
+ return 0, err
+ }
+
+ d, err := v.parseDouble()
+
+ ejvr.pop()
+ return d, err
+}
+
+func (ejvr *extJSONValueReader) ReadInt32() (int32, error) {
+ if err := ejvr.ensureElementValue(bsontype.Int32, 0, "ReadInt32"); err != nil {
+ return 0, err
+ }
+
+ v, err := ejvr.p.readValue(bsontype.Int32)
+ if err != nil {
+ return 0, err
+ }
+
+ i, err := v.parseInt32()
+
+ ejvr.pop()
+ return i, err
+}
+
+func (ejvr *extJSONValueReader) ReadInt64() (int64, error) {
+ if err := ejvr.ensureElementValue(bsontype.Int64, 0, "ReadInt64"); err != nil {
+ return 0, err
+ }
+
+ v, err := ejvr.p.readValue(bsontype.Int64)
+ if err != nil {
+ return 0, err
+ }
+
+ i, err := v.parseInt64()
+
+ ejvr.pop()
+ return i, err
+}
+
+func (ejvr *extJSONValueReader) ReadJavascript() (code string, err error) {
+ if err = ejvr.ensureElementValue(bsontype.JavaScript, 0, "ReadJavascript"); err != nil {
+ return "", err
+ }
+
+ v, err := ejvr.p.readValue(bsontype.JavaScript)
+ if err != nil {
+ return "", err
+ }
+
+ code, err = v.parseJavascript()
+
+ ejvr.pop()
+ return code, err
+}
+
+func (ejvr *extJSONValueReader) ReadMaxKey() error {
+ if err := ejvr.ensureElementValue(bsontype.MaxKey, 0, "ReadMaxKey"); err != nil {
+ return err
+ }
+
+ v, err := ejvr.p.readValue(bsontype.MaxKey)
+ if err != nil {
+ return err
+ }
+
+ err = v.parseMinMaxKey("max")
+
+ ejvr.pop()
+ return err
+}
+
+func (ejvr *extJSONValueReader) ReadMinKey() error {
+ if err := ejvr.ensureElementValue(bsontype.MinKey, 0, "ReadMinKey"); err != nil {
+ return err
+ }
+
+ v, err := ejvr.p.readValue(bsontype.MinKey)
+ if err != nil {
+ return err
+ }
+
+ err = v.parseMinMaxKey("min")
+
+ ejvr.pop()
+ return err
+}
+
+func (ejvr *extJSONValueReader) ReadNull() error {
+ if err := ejvr.ensureElementValue(bsontype.Null, 0, "ReadNull"); err != nil {
+ return err
+ }
+
+ v, err := ejvr.p.readValue(bsontype.Null)
+ if err != nil {
+ return err
+ }
+
+ if v.t != bsontype.Null {
+ return fmt.Errorf("expected type null but got type %s", v.t)
+ }
+
+ ejvr.pop()
+ return nil
+}
+
+func (ejvr *extJSONValueReader) ReadObjectID() (primitive.ObjectID, error) {
+ if err := ejvr.ensureElementValue(bsontype.ObjectID, 0, "ReadObjectID"); err != nil {
+ return primitive.ObjectID{}, err
+ }
+
+ v, err := ejvr.p.readValue(bsontype.ObjectID)
+ if err != nil {
+ return primitive.ObjectID{}, err
+ }
+
+ oid, err := v.parseObjectID()
+
+ ejvr.pop()
+ return oid, err
+}
+
+func (ejvr *extJSONValueReader) ReadRegex() (pattern string, options string, err error) {
+ if err = ejvr.ensureElementValue(bsontype.Regex, 0, "ReadRegex"); err != nil {
+ return "", "", err
+ }
+
+ v, err := ejvr.p.readValue(bsontype.Regex)
+ if err != nil {
+ return "", "", err
+ }
+
+ pattern, options, err = v.parseRegex()
+
+ ejvr.pop()
+ return pattern, options, err
+}
+
+func (ejvr *extJSONValueReader) ReadString() (string, error) {
+ if err := ejvr.ensureElementValue(bsontype.String, 0, "ReadString"); err != nil {
+ return "", err
+ }
+
+ v, err := ejvr.p.readValue(bsontype.String)
+ if err != nil {
+ return "", err
+ }
+
+ if v.t != bsontype.String {
+ return "", fmt.Errorf("expected type string but got type %s", v.t)
+ }
+
+ ejvr.pop()
+ return v.v.(string), nil
+}
+
+func (ejvr *extJSONValueReader) ReadSymbol() (symbol string, err error) {
+ if err = ejvr.ensureElementValue(bsontype.Symbol, 0, "ReadSymbol"); err != nil {
+ return "", err
+ }
+
+ v, err := ejvr.p.readValue(bsontype.Symbol)
+ if err != nil {
+ return "", err
+ }
+
+ symbol, err = v.parseSymbol()
+
+ ejvr.pop()
+ return symbol, err
+}
+
+func (ejvr *extJSONValueReader) ReadTimestamp() (t uint32, i uint32, err error) {
+ if err = ejvr.ensureElementValue(bsontype.Timestamp, 0, "ReadTimestamp"); err != nil {
+ return 0, 0, err
+ }
+
+ v, err := ejvr.p.readValue(bsontype.Timestamp)
+ if err != nil {
+ return 0, 0, err
+ }
+
+ t, i, err = v.parseTimestamp()
+
+ ejvr.pop()
+ return t, i, err
+}
+
+func (ejvr *extJSONValueReader) ReadUndefined() error {
+ if err := ejvr.ensureElementValue(bsontype.Undefined, 0, "ReadUndefined"); err != nil {
+ return err
+ }
+
+ v, err := ejvr.p.readValue(bsontype.Undefined)
+ if err != nil {
+ return err
+ }
+
+ err = v.parseUndefined()
+
+ ejvr.pop()
+ return err
+}
+
+func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) {
+ switch ejvr.stack[ejvr.frame].mode {
+ case mTopLevel, mDocument, mCodeWithScope:
+ default:
+ return "", nil, ejvr.invalidTransitionErr(mElement, "ReadElement", []mode{mTopLevel, mDocument, mCodeWithScope})
+ }
+
+ name, t, err := ejvr.p.readKey()
+
+ if err != nil {
+ if errors.Is(err, ErrEOD) {
+ if ejvr.stack[ejvr.frame].mode == mCodeWithScope {
+ _, err := ejvr.p.peekType()
+ if err != nil {
+ return "", nil, err
+ }
+ }
+
+ ejvr.pop()
+ }
+
+ return "", nil, err
+ }
+
+ ejvr.push(mElement, t)
+ return name, ejvr, nil
+}
+
+func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) {
+ switch ejvr.stack[ejvr.frame].mode {
+ case mArray:
+ default:
+ return nil, ejvr.invalidTransitionErr(mValue, "ReadValue", []mode{mArray})
+ }
+
+ t, err := ejvr.p.peekType()
+ if err != nil {
+ if errors.Is(err, ErrEOA) {
+ ejvr.pop()
+ }
+
+ return nil, err
+ }
+
+ ejvr.push(mValue, t)
+ return ejvr, nil
+}
diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go
new file mode 100644
index 00000000000..ba39c9601fb
--- /dev/null
+++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go
@@ -0,0 +1,223 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on github.com/golang/go by The Go Authors
+// See THIRD-PARTY-NOTICES for original license terms.
+
+package bsonrw
+
+import "unicode/utf8"
+
+// safeSet holds the value true if the ASCII character with the given array
+// position can be represented inside a JSON string without any further
+// escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), and the backslash character ("\").
+var safeSet = [utf8.RuneSelf]bool{
+ ' ': true,
+ '!': true,
+ '"': false,
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+ '(': true,
+ ')': true,
+ '*': true,
+ '+': true,
+ ',': true,
+ '-': true,
+ '.': true,
+ '/': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ ':': true,
+ ';': true,
+ '<': true,
+ '=': true,
+ '>': true,
+ '?': true,
+ '@': true,
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'V': true,
+ 'W': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+ '[': true,
+ '\\': false,
+ ']': true,
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+ '{': true,
+ '|': true,
+ '}': true,
+ '~': true,
+ '\u007f': true,
+}
+
+// htmlSafeSet holds the value true if the ASCII character with the given
+// array position can be safely represented inside a JSON string, embedded
+// inside of HTML